diff --git a/.github/workflows/build-ci-container-windows.yml b/.github/workflows/build-ci-container-windows.yml index 167e7cf06b3b2..14c349b1b2fe5 100644 --- a/.github/workflows/build-ci-container-windows.yml +++ b/.github/workflows/build-ci-container-windows.yml @@ -44,7 +44,7 @@ jobs: run: | docker save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} - name: Upload container image - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: container path: ${{ steps.vars.outputs.container-filename }} diff --git a/.github/workflows/build-ci-container.yml b/.github/workflows/build-ci-container.yml index 67f35fd30701f..01f1b8dc4f990 100644 --- a/.github/workflows/build-ci-container.yml +++ b/.github/workflows/build-ci-container.yml @@ -64,7 +64,7 @@ jobs: podman save ${{ steps.vars.outputs.container-name-agent-tag }} > ${{ steps.vars.outputs.container-agent-filename }} - name: Upload container image - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: container-${{ matrix.arch }} path: "*.tar" diff --git a/.github/workflows/build-metrics-container.yml b/.github/workflows/build-metrics-container.yml index cadcaa9a42e8f..69b571575f40c 100644 --- a/.github/workflows/build-metrics-container.yml +++ b/.github/workflows/build-metrics-container.yml @@ -49,7 +49,7 @@ jobs: run: | podman save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} - name: Upload Container Image - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: container path: ${{ steps.vars.outputs.container-filename }} diff --git a/.github/workflows/check-ci.yml b/.github/workflows/check-ci.yml index 7e8c15696e344..f18a69c192ee9 100644 --- a/.github/workflows/check-ci.yml +++ b/.github/workflows/check-ci.yml @@ -26,7 +26,7 @@ jobs: with: sparse-checkout: .ci - name: Setup Python - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: 3.13 cache: 'pip' diff --git a/.github/workflows/ci-post-commit-analyzer.yml b/.github/workflows/ci-post-commit-analyzer.yml index 7d37b900d7909..49cf4100dd71c 100644 --- a/.github/workflows/ci-post-commit-analyzer.yml +++ b/.github/workflows/ci-post-commit-analyzer.yml @@ -44,7 +44,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: # A full build of llvm, clang, lld, and lldb takes about 250MB # of ccache space. There's not much reason to have more than this, @@ -87,7 +87,7 @@ jobs: scan-build --generate-index-only build/analyzer-results - name: Upload Results - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: analyzer-results diff --git a/.github/workflows/commit-access-review.yml b/.github/workflows/commit-access-review.yml index a7be81b0e2da5..734dc212fa648 100644 --- a/.github/workflows/commit-access-review.yml +++ b/.github/workflows/commit-access-review.yml @@ -28,7 +28,7 @@ jobs: python3 .github/workflows/commit-access-review.py $GITHUB_TOKEN - name: Upload Triage List - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: triagers path: triagers.log diff --git a/.github/workflows/containers/github-action-ci/Dockerfile b/.github/workflows/containers/github-action-ci/Dockerfile index 892fc9005de85..dc0c9cabc7f01 100644 --- a/.github/workflows/containers/github-action-ci/Dockerfile +++ b/.github/workflows/containers/github-action-ci/Dockerfile @@ -1,7 +1,7 @@ -FROM docker.io/library/ubuntu:24.04 as base +FROM docker.io/library/ubuntu:24.04 AS base ENV LLVM_SYSROOT=/opt/llvm -FROM base as stage1-toolchain +FROM base AS stage1-toolchain ENV LLVM_VERSION=21.1.1 RUN apt-get update && \ @@ -37,7 +37,7 @@ RUN cmake -B ./build -G Ninja ./llvm \ RUN ninja -C ./build stage2-clang-bolt stage2-install-distribution && ninja -C ./build install-distribution -FROM base as ci-container +FROM base AS ci-container COPY --from=stage1-toolchain $LLVM_SYSROOT $LLVM_SYSROOT @@ -62,6 +62,7 @@ RUN apt-get update && \ # Having a symlink from python to python3 enables code sharing between # the Linux and Windows pipelines. python3-pip \ + python3-venv \ file \ tzdata \ python-is-python3 && \ @@ -97,7 +98,7 @@ RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers USER gha WORKDIR /home/gha -FROM ci-container as ci-container-agent +FROM ci-container AS ci-container-agent ENV GITHUB_RUNNER_VERSION=2.328.0 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 8cdd39c164cca..b5f3413fe3b6b 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -95,9 +95,9 @@ jobs: workflow: - '.github/workflows/docs.yml' - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: '3.11' + python-version: '3.13' cache: 'pip' cache-dependency-path: 'llvm/docs/requirements-hashed.txt' - name: Install python dependencies @@ -209,7 +209,7 @@ jobs: mkdir built-docs/flang cp -r flang-build/docs/* built-docs/flang/ - name: Upload docs - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: docs-output path: built-docs/ diff --git a/.github/workflows/email-check.yaml b/.github/workflows/email-check.yaml index 9390fba4d4e3b..981c6fa62cb19 100644 --- a/.github/workflows/email-check.yaml +++ b/.github/workflows/email-check.yaml @@ -39,7 +39,7 @@ jobs: [{"body" : "$COMMENT"}] EOF - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: workflow-args diff --git a/.github/workflows/gha-codeql.yml b/.github/workflows/gha-codeql.yml index efb8143877c4e..63388ebc706bd 100644 --- a/.github/workflows/gha-codeql.yml +++ b/.github/workflows/gha-codeql.yml @@ -29,9 +29,9 @@ jobs: sparse-checkout: | .github/ - name: Initialize CodeQL - uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/init@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 with: languages: actions queries: security-extended - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.30.3 + uses: github/codeql-action/analyze@303c0aef88fc2fe5ff6d63d3b1596bfd83dfa1f9 # v3.30.4 diff --git a/.github/workflows/hlsl-test-all.yaml b/.github/workflows/hlsl-test-all.yaml index 72cbbe2b7dded..dcb852312d41a 100644 --- a/.github/workflows/hlsl-test-all.yaml +++ b/.github/workflows/hlsl-test-all.yaml @@ -80,7 +80,7 @@ jobs: ninja check-hlsl-unit ninja ${{ inputs.TestTarget }} - name: Publish Test Results - uses: EnricoMi/publish-unit-test-result-action/macos@170bf24d20d201b842d7a52403b73ed297e6645b # v2 + uses: EnricoMi/publish-unit-test-result-action/macos@3a74b2957438d0b6e2e61d67b05318aa25c9e6c6 # v2.20.0 if: always() && runner.os == 'macOS' with: comment_mode: off diff --git a/.github/workflows/issue-write.yml b/.github/workflows/issue-write.yml index db9389b6afe53..26cd60c070251 100644 --- a/.github/workflows/issue-write.yml +++ b/.github/workflows/issue-write.yml @@ -40,7 +40,7 @@ jobs: - name: 'Comment on PR' if: steps.download-artifact.outputs.artifact-id != '' - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/libc-fullbuild-tests.yml b/.github/workflows/libc-fullbuild-tests.yml index 8967cd0949c11..3a048aeb9405b 100644 --- a/.github/workflows/libc-fullbuild-tests.yml +++ b/.github/workflows/libc-fullbuild-tests.yml @@ -61,7 +61,7 @@ jobs: # Do not use direct GHAC access even though it is supported by sccache. GHAC rejects # frequent small object writes. - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 1G key: libc_fullbuild_${{ matrix.c_compiler }} diff --git a/.github/workflows/libc-overlay-tests.yml b/.github/workflows/libc-overlay-tests.yml index 7154946ac5c3d..df9a20dce8eae 100644 --- a/.github/workflows/libc-overlay-tests.yml +++ b/.github/workflows/libc-overlay-tests.yml @@ -51,7 +51,7 @@ jobs: # Do not use direct GHAC access even though it is supported by sccache. GHAC rejects # frequent small object writes. - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 1G key: libc_overlay_build_${{ matrix.os }}_${{ matrix.compiler.c_compiler }} diff --git a/.github/workflows/libclang-abi-tests.yml b/.github/workflows/libclang-abi-tests.yml index d53a2f306afa2..5ccf976848197 100644 --- a/.github/workflows/libclang-abi-tests.yml +++ b/.github/workflows/libclang-abi-tests.yml @@ -131,7 +131,7 @@ jobs: sed -i 's/LLVM_[0-9]\+/LLVM_NOVERSION/' $lib-${{ matrix.ref }}.abi done - name: Upload ABI file - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: ${{ matrix.name }} path: '*${{ matrix.ref }}.abi' @@ -165,7 +165,7 @@ jobs: done - name: Upload ABI Comparison if: always() - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: compat-report-${{ github.sha }} path: compat_reports/ diff --git a/.github/workflows/libclang-python-tests.yml b/.github/workflows/libclang-python-tests.yml index e168928325561..8fb8cec3b4f00 100644 --- a/.github/workflows/libclang-python-tests.yml +++ b/.github/workflows/libclang-python-tests.yml @@ -34,11 +34,11 @@ jobs: steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup Python - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: ${{ matrix.python-version }} - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 2G key: spirv-ubuntu-24.04 diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml index 2e6ff7f91b6fc..5fe2ffbf58b43 100644 --- a/.github/workflows/libcxx-build-and-test.yaml +++ b/.github/workflows/libcxx-build-and-test.yaml @@ -60,7 +60,7 @@ jobs: env: CC: ${{ matrix.cc }} CXX: ${{ matrix.cxx }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: ${{ matrix.config }}-${{ matrix.cxx }}-results @@ -105,7 +105,7 @@ jobs: env: CC: ${{ matrix.cc }} CXX: ${{ matrix.cxx }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() # Upload artifacts even if the build or test suite fails with: name: ${{ matrix.config }}-${{ matrix.cxx }}-results @@ -169,7 +169,7 @@ jobs: env: CC: clang-22 CXX: clang++-22 - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: ${{ matrix.config }}-results @@ -223,7 +223,7 @@ jobs: source .venv/bin/activate python -m pip install psutil bash libcxx/utils/ci/run-buildbot ${{ matrix.config }} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() # Upload artifacts even if the build or test suite fails with: name: macos-${{ matrix.config }}-results diff --git a/.github/workflows/libcxx-build-containers.yml b/.github/workflows/libcxx-build-containers.yml index cbaa8e0f65129..312cb47fc3d93 100644 --- a/.github/workflows/libcxx-build-containers.yml +++ b/.github/workflows/libcxx-build-containers.yml @@ -55,7 +55,7 @@ jobs: TAG: ${{ github.sha }} - name: Log in to GitHub Container Registry - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0 with: registry: ghcr.io username: ${{ github.actor }} diff --git a/.github/workflows/libcxx-check-generated-files.yml b/.github/workflows/libcxx-check-generated-files.yml index f338bd6952779..d34b6a79556d1 100644 --- a/.github/workflows/libcxx-check-generated-files.yml +++ b/.github/workflows/libcxx-check-generated-files.yml @@ -15,7 +15,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install dependencies - uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 + uses: aminya/setup-cpp@a276e6e3d1db9160db5edc458e99a30d3b109949 # v1.7.1 with: clangformat: 17.0.1 ninja: true diff --git a/.github/workflows/libcxx-run-benchmarks.yml b/.github/workflows/libcxx-run-benchmarks.yml index 17a97df029ba5..0379a0a1f857d 100644 --- a/.github/workflows/libcxx-run-benchmarks.yml +++ b/.github/workflows/libcxx-run-benchmarks.yml @@ -35,7 +35,7 @@ jobs: steps: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: '3.10' + python-version: '3.13' - name: Extract information from the PR id: vars diff --git a/.github/workflows/llvm-bugs.yml b/.github/workflows/llvm-bugs.yml index 5470662c97628..7d42abfadde7b 100644 --- a/.github/workflows/llvm-bugs.yml +++ b/.github/workflows/llvm-bugs.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-24.04 if: github.repository == 'llvm/llvm-project' steps: - - uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0 + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version: 18 check-latest: true diff --git a/.github/workflows/llvm-tests.yml b/.github/workflows/llvm-tests.yml index ea80e229512d5..c4701c7283da0 100644 --- a/.github/workflows/llvm-tests.yml +++ b/.github/workflows/llvm-tests.yml @@ -128,14 +128,14 @@ jobs: # Remove symbol versioning from dumps, so we can compare across major versions. sed -i 's/LLVM_${{ matrix.llvm_version_major }}/LLVM_NOVERSION/' ${{ matrix.ref }}.abi - name: Upload ABI file - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: ${{ matrix.name }} path: ${{ matrix.ref }}.abi - name: Upload symbol list file if: matrix.name == 'build-baseline' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: symbol-list path: llvm.symbols @@ -179,7 +179,7 @@ jobs: abi-compliance-checker $EXTRA_ARGS -l libLLVM.so -old build-baseline/*.abi -new build-latest/*.abi || test "${{ needs.abi-dump-setup.outputs.ABI_HEADERS }}" = "llvm-c" - name: Upload ABI Comparison if: always() - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: compat-report-${{ github.sha }} path: compat_reports/ diff --git a/.github/workflows/mlir-spirv-tests.yml b/.github/workflows/mlir-spirv-tests.yml index 78952ccad2642..5bb16c739cdde 100644 --- a/.github/workflows/mlir-spirv-tests.yml +++ b/.github/workflows/mlir-spirv-tests.yml @@ -30,7 +30,7 @@ jobs: steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 2G key: spirv-mlir-ubuntu-24.04 diff --git a/.github/workflows/pr-code-format.yml b/.github/workflows/pr-code-format.yml index 61c8680cd72a1..1e0dc7045c1cc 100644 --- a/.github/workflows/pr-code-format.yml +++ b/.github/workflows/pr-code-format.yml @@ -43,14 +43,14 @@ jobs: # of a release cycle (x.1.0) or the last version of a release cycle, or # if there have been relevant clang-format backports. - name: Install clang-format - uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 + uses: aminya/setup-cpp@a276e6e3d1db9160db5edc458e99a30d3b109949 # v1.7.1 with: clangformat: 21.1.0 - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: '3.11' + python-version: '3.13' cache: 'pip' cache-dependency-path: 'llvm/utils/git/requirements_formatting.txt' @@ -72,7 +72,7 @@ jobs: --end-rev HEAD \ --changed-files "$CHANGED_FILES" - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: workflow-args diff --git a/.github/workflows/pr-code-lint.yml b/.github/workflows/pr-code-lint.yml index daefc9baacce7..776ec4af9d2dc 100644 --- a/.github/workflows/pr-code-lint.yml +++ b/.github/workflows/pr-code-lint.yml @@ -27,7 +27,7 @@ jobs: cancel-in-progress: true steps: - name: Fetch LLVM sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 with: fetch-depth: 2 @@ -51,14 +51,14 @@ jobs: # of a release cycle (x.1.0) or the last version of a release cycle, or # if there have been relevant clang-format backports. - name: Install clang-tidy - uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 + uses: aminya/setup-cpp@a276e6e3d1db9160db5edc458e99a30d3b109949 # v1.7.1 with: clang-tidy: 21.1.0 - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: '3.12' + python-version: '3.13' - name: Install Python dependencies run: python3 -m pip install -r llvm/utils/git/requirements_linting.txt @@ -107,7 +107,7 @@ jobs: --changed-files "$CHANGED_FILES" - name: Upload results - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: workflow-args diff --git a/.github/workflows/pr-request-release-note.yml b/.github/workflows/pr-request-release-note.yml index f0197d71d6aa9..8162a8984ee5f 100644 --- a/.github/workflows/pr-request-release-note.yml +++ b/.github/workflows/pr-request-release-note.yml @@ -41,7 +41,7 @@ jobs: request-release-note \ --pr-number ${{ github.event.pull_request.number}} - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: always() with: name: workflow-args diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml index 63ab4a8356971..a9c107e4a5f08 100644 --- a/.github/workflows/premerge.yaml +++ b/.github/workflows/premerge.yaml @@ -76,7 +76,7 @@ jobs: # https://github.com/actions/upload-artifact/issues/569 continue-on-error: true if: '!cancelled()' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: Premerge Artifacts (Linux) path: artifacts/ @@ -130,7 +130,7 @@ jobs: # https://github.com/actions/upload-artifact/issues/569 continue-on-error: true if: '!cancelled()' - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: Premerge Artifacts (Windows) path: artifacts/ @@ -151,7 +151,7 @@ jobs: with: fetch-depth: 2 - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: "2000M" - name: Install Ninja diff --git a/.github/workflows/release-asset-audit.yml b/.github/workflows/release-asset-audit.yml index 6546540a1b547..8b24948b568eb 100644 --- a/.github/workflows/release-asset-audit.yml +++ b/.github/workflows/release-asset-audit.yml @@ -38,7 +38,7 @@ jobs: if: >- github.event_name != 'pull_request' && failure() - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 + uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 with: github-token: ${{ secrets.ISSUE_SUBSCRIBER_TOKEN }} script: | diff --git a/.github/workflows/release-binaries-save-stage/action.yml b/.github/workflows/release-binaries-save-stage/action.yml index f08088c7bc56f..84ccf98c23a82 100644 --- a/.github/workflows/release-binaries-save-stage/action.yml +++ b/.github/workflows/release-binaries-save-stage/action.yml @@ -30,14 +30,14 @@ runs: tar -C ${{ inputs.build-prefix }} -c build/ | zstd -T0 -c > build.tar.zst - name: Upload Stage 1 Source - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: ${{ runner.os }}-${{ runner.arch }}-${{ github.job }}-source path: llvm-project.tar.zst retention-days: 2 - name: Upload Stage 1 Build Dir - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: ${{ runner.os}}-${{ runner.arch }}-${{ github.job }}-build path: build.tar.zst diff --git a/.github/workflows/release-binaries-setup-stage/action.yml b/.github/workflows/release-binaries-setup-stage/action.yml index 8f45e22886b6e..475a25fa6b772 100644 --- a/.github/workflows/release-binaries-setup-stage/action.yml +++ b/.github/workflows/release-binaries-setup-stage/action.yml @@ -22,7 +22,7 @@ runs: using: "composite" steps: - name: Install Ninja - uses: llvm/actions/install-ninja@22e9f909d35b50bd1181709564bfe816eaeaae81 # main + uses: llvm/actions/install-ninja@a1ea791b03c8e61f53a0e66f2f73db283aa0f01e # main - name: Setup Windows if: startsWith(runner.os, 'Windows') diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 8f422a0147748..cba48e4d0c70a 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -68,9 +68,9 @@ jobs: steps: # It's good practice to use setup-python, but this is also required on macos-14 # due to https://github.com/actions/runner-images/issues/10385 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f + - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: '3.12' + python-version: '3.13' - name: Checkout LLVM uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -250,7 +250,7 @@ jobs: release_dir=`find ${{ steps.setup-stage.outputs.build-prefix }}/build -iname 'stage2-bins'` mv $release_dir/${{ needs.prepare.outputs.release-binary-filename }} . - - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: ${{ runner.os }}-${{ runner.arch }}-release-binary # Due to path differences on Windows when running in bash vs running on node, @@ -301,7 +301,7 @@ jobs: - name: Attest Build Provenance id: provenance - uses: actions/attest-build-provenance@897ed5eab6ed058a474202017ada7f40bfa52940 # v1.0.0 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: ${{ needs.prepare.outputs.release-binary-filename }} @@ -310,7 +310,7 @@ jobs: mv ${{ steps.provenance.outputs.bundle-path }} ${{ needs.prepare.outputs.release-binary-filename }}.jsonl - name: Upload Build Provenance - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 #v4.3.3 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: ${{ needs.prepare.outputs.release-binary-filename }}-attestation path: ${{ needs.prepare.outputs.release-binary-filename }}.jsonl diff --git a/.github/workflows/release-documentation.yml b/.github/workflows/release-documentation.yml index 712ff1831170e..d3d375d3a6df9 100644 --- a/.github/workflows/release-documentation.yml +++ b/.github/workflows/release-documentation.yml @@ -37,7 +37,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: cache: 'pip' cache-dependency-path: './llvm/docs/requirements.txt' @@ -59,7 +59,7 @@ jobs: ./llvm/utils/release/build-docs.sh -release "${{ inputs.release-version }}" -no-doxygen - name: Create Release Notes Artifact - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 with: name: release-notes path: docs-build/html-export/ diff --git a/.github/workflows/release-doxygen.yml b/.github/workflows/release-doxygen.yml index 17c677413f744..79e509e5e6a8b 100644 --- a/.github/workflows/release-doxygen.yml +++ b/.github/workflows/release-doxygen.yml @@ -43,7 +43,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup Python env - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: cache: 'pip' cache-dependency-path: './llvm/docs/requirements.txt' diff --git a/.github/workflows/release-lit.yml b/.github/workflows/release-lit.yml index 60ec64462bc31..8b1ce04e12c4f 100644 --- a/.github/workflows/release-lit.yml +++ b/.github/workflows/release-lit.yml @@ -45,7 +45,7 @@ jobs: ./llvm/utils/release/./github-upload-release.py --token "$GITHUB_TOKEN" --user ${{ github.actor }} --user-token "$USER_TOKEN" check-permissions - name: Setup Cpp - uses: aminya/setup-cpp@17c11551771948abc5752bbf3183482567c7caf0 # v1.1.1 + uses: aminya/setup-cpp@a276e6e3d1db9160db5edc458e99a30d3b109949 # v1.7.1 with: compiler: llvm-16.0.6 cmake: true @@ -66,14 +66,14 @@ jobs: python3 setup.py sdist bdist_wheel - name: Upload lit to test.pypi.org - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4 + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 with: password: ${{ secrets.LLVM_LIT_TEST_PYPI_API_TOKEN }} repository-url: https://test.pypi.org/legacy/ packages-dir: llvm/utils/lit/dist/ - name: Upload lit to pypi.org - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # v1.12.4 + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 with: password: ${{ secrets.LLVM_LIT_PYPI_API_TOKEN }} packages-dir: llvm/utils/lit/dist/ diff --git a/.github/workflows/release-sources.yml b/.github/workflows/release-sources.yml index 14cc4c4e9b94f..2278b96dbe242 100644 --- a/.github/workflows/release-sources.yml +++ b/.github/workflows/release-sources.yml @@ -92,14 +92,14 @@ jobs: - name: Attest Build Provenance if: github.event_name != 'pull_request' id: provenance - uses: actions/attest-build-provenance@897ed5eab6ed058a474202017ada7f40bfa52940 # v1.0.0 + uses: actions/attest-build-provenance@ef244123eb79f2f7a7e75d99086184180e6d0018 # v1.4.4 with: subject-path: "*.xz" - if: github.event_name != 'pull_request' run: | mv ${{ steps.provenance.outputs.bundle-path }} . - name: Create Tarball Artifacts - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 #v4.3.3 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: path: | *.xz diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 40db5504294ef..c07df338cf989 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -36,7 +36,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif @@ -49,7 +49,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: SARIF file path: results.sarif @@ -57,6 +57,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@80f993039571a6de66594ecaa432875a6942e8e0 # v2.20.6 + uses: github/codeql-action/upload-sarif@b8d3b6e8af63cde30bdc382c0bc28114f4346c88 # v2.28.1 with: sarif_file: results.sarif diff --git a/.github/workflows/spirv-tests.yml b/.github/workflows/spirv-tests.yml index 8708fb06d9eb8..69374ae563306 100644 --- a/.github/workflows/spirv-tests.yml +++ b/.github/workflows/spirv-tests.yml @@ -26,7 +26,7 @@ jobs: steps: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup ccache - uses: hendrikmuhs/ccache-action@a1209f81afb8c005c13b4296c32e363431bffea5 # v1.2.17 + uses: hendrikmuhs/ccache-action@bfa03e1de4d7f7c3e80ad9109feedd05c4f5a716 # v1.2.19 with: max-size: 2G key: spirv-ubuntu-24.04 diff --git a/.github/workflows/unprivileged-download-artifact/action.yml b/.github/workflows/unprivileged-download-artifact/action.yml index 9d8fb59a67c0e..5b50d7ce3d3fb 100644 --- a/.github/workflows/unprivileged-download-artifact/action.yml +++ b/.github/workflows/unprivileged-download-artifact/action.yml @@ -27,7 +27,7 @@ outputs: runs: using: "composite" steps: - - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 + - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 id: artifact-url with: script: | diff --git a/bolt/lib/Core/GDBIndex.cpp b/bolt/lib/Core/GDBIndex.cpp index 7ccbd2e50db68..4c34f5ee7fca7 100644 --- a/bolt/lib/Core/GDBIndex.cpp +++ b/bolt/lib/Core/GDBIndex.cpp @@ -100,10 +100,19 @@ void GDBIndex::updateGdbIndexSection( Data += SymbolTableOffset - CUTypesOffset; // Calculate the size of the new address table. + const auto IsValidAddressRange = [](const DebugAddressRange &Range) { + return Range.HighPC > Range.LowPC; + }; + uint32_t NewAddressTableSize = 0; for (const auto &CURangesPair : ARangesSectionWriter.getCUAddressRanges()) { const SmallVector &Ranges = CURangesPair.second; - NewAddressTableSize += Ranges.size() * 20; + NewAddressTableSize += + llvm::count_if(Ranges, + [&IsValidAddressRange](const DebugAddressRange &Range) { + return IsValidAddressRange(Range); + }) * + 20; } // Difference between old and new table (and section) sizes. @@ -201,10 +210,15 @@ void GDBIndex::updateGdbIndexSection( const uint32_t UpdatedCUIndex = RemapCUIndex(OriginalCUIndex); const DebugAddressRangesVector &Ranges = CURangesPair.second; for (const DebugAddressRange &Range : Ranges) { - write64le(Buffer, Range.LowPC); - write64le(Buffer + 8, Range.HighPC); - write32le(Buffer + 16, UpdatedCUIndex); - Buffer += 20; + // Don't emit ranges that break gdb, + // https://sourceware.org/bugzilla/show_bug.cgi?id=33247. + // We've seen [0, 0) ranges here, for instance. + if (IsValidAddressRange(Range)) { + write64le(Buffer, Range.LowPC); + write64le(Buffer + 8, Range.HighPC); + write32le(Buffer + 16, UpdatedCUIndex); + Buffer += 20; + } } } diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp index 6752489ad562a..5c89a424caa7f 100644 --- a/bolt/lib/Rewrite/DWARFRewriter.cpp +++ b/bolt/lib/Rewrite/DWARFRewriter.cpp @@ -504,9 +504,7 @@ static void emitDWOBuilder(const std::string &DWOName, } emitUnit(DWODIEBuilder, *Streamer, SplitCU); } else { - for (std::unique_ptr &CU : - SplitCU.getContext().dwo_compile_units()) - emitUnit(DWODIEBuilder, *Streamer, *CU); + emitUnit(DWODIEBuilder, *Streamer, SplitCU); // emit debug_types sections for dwarf4 for (DWARFUnit *CU : DWODIEBuilder.getDWARF4TUVector()) diff --git a/bolt/lib/Target/AArch64/CMakeLists.txt b/bolt/lib/Target/AArch64/CMakeLists.txt index cb38117de659e..53554e75de15c 100644 --- a/bolt/lib/Target/AArch64/CMakeLists.txt +++ b/bolt/lib/Target/AArch64/CMakeLists.txt @@ -28,7 +28,7 @@ add_llvm_library(LLVMBOLTTargetAArch64 AArch64CommonTableGen ) -target_link_libraries(LLVMBOLTTargetAArch64 PRIVATE LLVMBOLTCore) +target_link_libraries(LLVMBOLTTargetAArch64 PRIVATE LLVMBOLTCore LLVMBOLTUtils) include_directories( ${LLVM_MAIN_SRC_DIR}/lib/Target/AArch64 diff --git a/bolt/test/AArch64/dwarf4-dwp-aarch64.s b/bolt/test/AArch64/dwarf4-dwp-aarch64.s new file mode 100755 index 0000000000000..37507e100a62d --- /dev/null +++ b/bolt/test/AArch64/dwarf4-dwp-aarch64.s @@ -0,0 +1,407 @@ +## This test checks updating debuginfo via dwarf4 dwp file +# RUN: rm -rf %t && mkdir -p %t && cd %t +# RUN: split-file %s %t +# RUN: llvm-mc -filetype=obj -triple aarch64-unknown-unknown --split-dwarf-file=main.exe-main.dwo %t/main.s -o %t/main.o +# RUN: llvm-mc -filetype=obj -triple aarch64-unknown-unknown --split-dwarf-file=main.exe-callee.dwo %t/callee.s -o %t/callee.o +# RUN: %clangxx %cxxflags -gdwarf-4 -gsplit-dwarf=split -Wl,-e,main %t/main.o %t/callee.o -o main.exe +# RUN: llvm-dwp -e %t/main.exe -o %t/main.exe.dwp +# RUN: llvm-bolt %t/main.exe -o %t/main.exe.bolt -update-debug-sections 2>&1 | FileCheck %s + +# CHECK-NOT: Assertion + +#--- main.s + .file "main.cpp" + .globl main // -- Begin function main + .type main,@function +main: // @main +.Lfunc_begin0: + .file 1 "." "main.cpp" + .loc 1 2 0 // main.cpp:2:0 + .loc 1 2 21 prologue_end // main.cpp:2:21 + .loc 1 2 14 epilogue_begin is_stmt 0 // main.cpp:2:14 + ret +.Lfunc_end0: + .size main, .Lfunc_end0-main + .section .debug_abbrev,"",@progbits + .byte 1 // Abbreviation Code + .byte 17 // DW_TAG_compile_unit + .byte 0 // DW_CHILDREN_no + .byte 16 // DW_AT_stmt_list + .byte 23 // DW_FORM_sec_offset + .byte 27 // DW_AT_comp_dir + .byte 14 // DW_FORM_strp + .ascii "\264B" // DW_AT_GNU_pubnames + .byte 25 // DW_FORM_flag_present + .ascii "\260B" // DW_AT_GNU_dwo_name + .byte 14 // DW_FORM_strp + .ascii "\261B" // DW_AT_GNU_dwo_id + .byte 7 // DW_FORM_data8 + .byte 17 // DW_AT_low_pc + .byte 1 // DW_FORM_addr + .byte 18 // DW_AT_high_pc + .byte 6 // DW_FORM_data4 + .ascii "\263B" // DW_AT_GNU_addr_base + .byte 23 // DW_FORM_sec_offset + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 0 // EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .word .Ldebug_info_end0-.Ldebug_info_start0 // Length of Unit +.Ldebug_info_start0: + .hword 4 // DWARF version number + .word .debug_abbrev // Offset Into Abbrev. Section + .byte 8 // Address Size (in bytes) + .byte 1 // Abbrev [1] 0xb:0x25 DW_TAG_compile_unit + .word .Lline_table_start0 // DW_AT_stmt_list + .word .Lskel_string0 // DW_AT_comp_dir + // DW_AT_GNU_pubnames + .word .Lskel_string1 // DW_AT_GNU_dwo_name + .xword 1465063543908291764 // DW_AT_GNU_dwo_id + .xword .Lfunc_begin0 // DW_AT_low_pc + .word .Lfunc_end0-.Lfunc_begin0 // DW_AT_high_pc + .word .Laddr_table_base0 // DW_AT_GNU_addr_base +.Ldebug_info_end0: + .section .debug_str,"MS",@progbits,1 +.Lskel_string0: + .asciz "." // string offset=0 +.Lskel_string1: + .asciz "main.exe-main.dwo" // string offset=2 + .section .debug_str.dwo,"eMS",@progbits,1 +.Linfo_string0: + .asciz "main" // string offset=0 +.Linfo_string1: + .asciz "int" // string offset=5 +.Linfo_string2: + .byte 0 // string offset=9 +.Linfo_string3: + .asciz "main.cpp" // string offset=10 +.Linfo_string4: + .asciz "main.exe-main.dwo" // string offset=19 + .section .debug_str_offsets.dwo,"e",@progbits + .word 0 + .word 5 + .word 9 + .word 10 + .word 19 + .section .debug_info.dwo,"e",@progbits + .word .Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 // Length of Unit +.Ldebug_info_dwo_start0: + .hword 4 // DWARF version number + .word 0 // Offset Into Abbrev. Section + .byte 8 // Address Size (in bytes) + .byte 1 // Abbrev [1] 0xb:0x22 DW_TAG_compile_unit + .byte 2 // DW_AT_producer + .hword 33 // DW_AT_language + .byte 3 // DW_AT_name + .byte 4 // DW_AT_GNU_dwo_name + .xword 1465063543908291764 // DW_AT_GNU_dwo_id + .byte 2 // Abbrev [2] 0x19:0xf DW_TAG_subprogram + .byte 0 // DW_AT_low_pc + .word .Lfunc_end0-.Lfunc_begin0 // DW_AT_high_pc + .byte 1 // DW_AT_frame_base + .byte 109 + .byte 0 // DW_AT_name + .byte 1 // DW_AT_decl_file + .byte 2 // DW_AT_decl_line + .word 40 // DW_AT_type + // DW_AT_external + .byte 3 // Abbrev [3] 0x28:0x4 DW_TAG_base_type + .byte 1 // DW_AT_name + .byte 5 // DW_AT_encoding + .byte 4 // DW_AT_byte_size + .byte 0 // End Of Children Mark +.Ldebug_info_dwo_end0: + .section .debug_abbrev.dwo,"e",@progbits + .byte 1 // Abbreviation Code + .byte 17 // DW_TAG_compile_unit + .byte 1 // DW_CHILDREN_yes + .byte 37 // DW_AT_producer + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 19 // DW_AT_language + .byte 5 // DW_FORM_data2 + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .ascii "\260B" // DW_AT_GNU_dwo_name + .ascii "\202>" // DW_FORM_GNU_str_index + .ascii "\261B" // DW_AT_GNU_dwo_id + .byte 7 // DW_FORM_data8 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 2 // Abbreviation Code + .byte 46 // DW_TAG_subprogram + .byte 0 // DW_CHILDREN_no + .byte 17 // DW_AT_low_pc + .ascii "\201>" // DW_FORM_GNU_addr_index + .byte 18 // DW_AT_high_pc + .byte 6 // DW_FORM_data4 + .byte 64 // DW_AT_frame_base + .byte 24 // DW_FORM_exprloc + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 58 // DW_AT_decl_file + .byte 11 // DW_FORM_data1 + .byte 59 // DW_AT_decl_line + .byte 11 // DW_FORM_data1 + .byte 73 // DW_AT_type + .byte 19 // DW_FORM_ref4 + .byte 63 // DW_AT_external + .byte 25 // DW_FORM_flag_present + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 3 // Abbreviation Code + .byte 36 // DW_TAG_base_type + .byte 0 // DW_CHILDREN_no + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 62 // DW_AT_encoding + .byte 11 // DW_FORM_data1 + .byte 11 // DW_AT_byte_size + .byte 11 // DW_FORM_data1 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 0 // EOM(3) + .section .debug_addr,"",@progbits +.Laddr_table_base0: + .xword .Lfunc_begin0 + .section .debug_gnu_pubnames,"",@progbits + .word .LpubNames_end0-.LpubNames_start0 // Length of Public Names Info +.LpubNames_start0: + .hword 2 // DWARF Version + .word .Lcu_begin0 // Offset of Compilation Unit Info + .word 48 // Compilation Unit Length + .word 25 // DIE offset + .byte 48 // Attributes: FUNCTION, EXTERNAL + .asciz "main" // External Name + .word 0 // End Mark +.LpubNames_end0: + .section .debug_gnu_pubtypes,"",@progbits + .word .LpubTypes_end0-.LpubTypes_start0 // Length of Public Types Info +.LpubTypes_start0: + .hword 2 // DWARF Version + .word .Lcu_begin0 // Offset of Compilation Unit Info + .word 48 // Compilation Unit Length + .word 40 // DIE offset + .byte 144 // Attributes: TYPE, STATIC + .asciz "int" // External Name + .word 0 // End Mark +.LpubTypes_end0: + .section ".note.GNU-stack","",@progbits + .addrsig + .addrsig_sym _Z6calleei + .section .debug_line,"",@progbits +.Lline_table_start0: +#--- callee.s + .file "callee.cpp" + .globl _Z6calleei // -- Begin function _Z6calleei + .type _Z6calleei,@function +_Z6calleei: // @_Z6calleei +.Lfunc_begin0: + .file 1 "." "callee.cpp" + .loc 1 1 0 // callee.cpp:1:0 + .loc 1 1 28 prologue_end // callee.cpp:1:28 + .loc 1 1 21 epilogue_begin is_stmt 0 // callee.cpp:1:21 + ret +.Lfunc_end0: + .size _Z6calleei, .Lfunc_end0-_Z6calleei + .section .debug_abbrev,"",@progbits + .byte 1 // Abbreviation Code + .byte 17 // DW_TAG_compile_unit + .byte 0 // DW_CHILDREN_no + .byte 16 // DW_AT_stmt_list + .byte 23 // DW_FORM_sec_offset + .byte 27 // DW_AT_comp_dir + .byte 14 // DW_FORM_strp + .ascii "\264B" // DW_AT_GNU_pubnames + .byte 25 // DW_FORM_flag_present + .ascii "\260B" // DW_AT_GNU_dwo_name + .byte 14 // DW_FORM_strp + .ascii "\261B" // DW_AT_GNU_dwo_id + .byte 7 // DW_FORM_data8 + .byte 17 // DW_AT_low_pc + .byte 1 // DW_FORM_addr + .byte 18 // DW_AT_high_pc + .byte 6 // DW_FORM_data4 + .ascii "\263B" // DW_AT_GNU_addr_base + .byte 23 // DW_FORM_sec_offset + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 0 // EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .word .Ldebug_info_end0-.Ldebug_info_start0 // Length of Unit +.Ldebug_info_start0: + .hword 4 // DWARF version number + .word .debug_abbrev // Offset Into Abbrev. Section + .byte 8 // Address Size (in bytes) + .byte 1 // Abbrev [1] 0xb:0x25 DW_TAG_compile_unit + .word .Lline_table_start0 // DW_AT_stmt_list + .word .Lskel_string0 // DW_AT_comp_dir + // DW_AT_GNU_pubnames + .word .Lskel_string1 // DW_AT_GNU_dwo_name + .xword 7650227797527095061 // DW_AT_GNU_dwo_id + .xword .Lfunc_begin0 // DW_AT_low_pc + .word .Lfunc_end0-.Lfunc_begin0 // DW_AT_high_pc + .word .Laddr_table_base0 // DW_AT_GNU_addr_base +.Ldebug_info_end0: + .section .debug_str,"MS",@progbits,1 +.Lskel_string0: + .asciz "." // string offset=0 +.Lskel_string1: + .asciz "main.exe-callee.dwo" // string offset=2 + .section .debug_str.dwo,"eMS",@progbits,1 +.Linfo_string0: + .asciz "_Z6calleei" // string offset=0 +.Linfo_string1: + .asciz "callee" // string offset=11 +.Linfo_string2: + .asciz "int" // string offset=18 +.Linfo_string3: + .asciz "x" // string offset=22 +.Linfo_string4: + .byte 0 // string offset=24 +.Linfo_string5: + .asciz "callee.cpp" // string offset=25 +.Linfo_string6: + .asciz "main.exe-callee.dwo" // string offset=36 + .section .debug_str_offsets.dwo,"e",@progbits + .word 0 + .word 11 + .word 18 + .word 22 + .word 24 + .word 25 + .word 36 + .section .debug_info.dwo,"e",@progbits + .word .Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 // Length of Unit +.Ldebug_info_dwo_start0: + .hword 4 // DWARF version number + .word 0 // Offset Into Abbrev. Section + .byte 8 // Address Size (in bytes) + .byte 1 // Abbrev [1] 0xb:0x2f DW_TAG_compile_unit + .byte 4 // DW_AT_producer + .hword 33 // DW_AT_language + .byte 5 // DW_AT_name + .byte 6 // DW_AT_GNU_dwo_name + .xword 7650227797527095061 // DW_AT_GNU_dwo_id + .byte 2 // Abbrev [2] 0x19:0x1c DW_TAG_subprogram + .byte 0 // DW_AT_low_pc + .word .Lfunc_end0-.Lfunc_begin0 // DW_AT_high_pc + .byte 1 // DW_AT_frame_base + .byte 111 + .byte 0 // DW_AT_linkage_name + .byte 1 // DW_AT_name + .byte 1 // DW_AT_decl_file + .byte 1 // DW_AT_decl_line + .word 53 // DW_AT_type + // DW_AT_external + .byte 3 // Abbrev [3] 0x29:0xb DW_TAG_formal_parameter + .byte 2 // DW_AT_location + .byte 145 + .byte 12 + .byte 3 // DW_AT_name + .byte 1 // DW_AT_decl_file + .byte 1 // DW_AT_decl_line + .word 53 // DW_AT_type + .byte 0 // End Of Children Mark + .byte 4 // Abbrev [4] 0x35:0x4 DW_TAG_base_type + .byte 2 // DW_AT_name + .byte 5 // DW_AT_encoding + .byte 4 // DW_AT_byte_size + .byte 0 // End Of Children Mark +.Ldebug_info_dwo_end0: + .section .debug_abbrev.dwo,"e",@progbits + .byte 1 // Abbreviation Code + .byte 17 // DW_TAG_compile_unit + .byte 1 // DW_CHILDREN_yes + .byte 37 // DW_AT_producer + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 19 // DW_AT_language + .byte 5 // DW_FORM_data2 + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .ascii "\260B" // DW_AT_GNU_dwo_name + .ascii "\202>" // DW_FORM_GNU_str_index + .ascii "\261B" // DW_AT_GNU_dwo_id + .byte 7 // DW_FORM_data8 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 2 // Abbreviation Code + .byte 46 // DW_TAG_subprogram + .byte 1 // DW_CHILDREN_yes + .byte 17 // DW_AT_low_pc + .ascii "\201>" // DW_FORM_GNU_addr_index + .byte 18 // DW_AT_high_pc + .byte 6 // DW_FORM_data4 + .byte 64 // DW_AT_frame_base + .byte 24 // DW_FORM_exprloc + .byte 110 // DW_AT_linkage_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 58 // DW_AT_decl_file + .byte 11 // DW_FORM_data1 + .byte 59 // DW_AT_decl_line + .byte 11 // DW_FORM_data1 + .byte 73 // DW_AT_type + .byte 19 // DW_FORM_ref4 + .byte 63 // DW_AT_external + .byte 25 // DW_FORM_flag_present + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 3 // Abbreviation Code + .byte 5 // DW_TAG_formal_parameter + .byte 0 // DW_CHILDREN_no + .byte 2 // DW_AT_location + .byte 24 // DW_FORM_exprloc + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 58 // DW_AT_decl_file + .byte 11 // DW_FORM_data1 + .byte 59 // DW_AT_decl_line + .byte 11 // DW_FORM_data1 + .byte 73 // DW_AT_type + .byte 19 // DW_FORM_ref4 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 4 // Abbreviation Code + .byte 36 // DW_TAG_base_type + .byte 0 // DW_CHILDREN_no + .byte 3 // DW_AT_name + .ascii "\202>" // DW_FORM_GNU_str_index + .byte 62 // DW_AT_encoding + .byte 11 // DW_FORM_data1 + .byte 11 // DW_AT_byte_size + .byte 11 // DW_FORM_data1 + .byte 0 // EOM(1) + .byte 0 // EOM(2) + .byte 0 // EOM(3) + .section .debug_addr,"",@progbits +.Laddr_table_base0: + .xword .Lfunc_begin0 + .section .debug_gnu_pubnames,"",@progbits + .word .LpubNames_end0-.LpubNames_start0 // Length of Public Names Info +.LpubNames_start0: + .hword 2 // DWARF Version + .word .Lcu_begin0 // Offset of Compilation Unit Info + .word 48 // Compilation Unit Length + .word 25 // DIE offset + .byte 48 // Attributes: FUNCTION, EXTERNAL + .asciz "callee" // External Name + .word 0 // End Mark +.LpubNames_end0: + .section .debug_gnu_pubtypes,"",@progbits + .word .LpubTypes_end0-.LpubTypes_start0 // Length of Public Types Info +.LpubTypes_start0: + .hword 2 // DWARF Version + .word .Lcu_begin0 // Offset of Compilation Unit Info + .word 48 // Compilation Unit Length + .word 53 // DIE offset + .byte 144 // Attributes: TYPE, STATIC + .asciz "int" // External Name + .word 0 // End Mark +.LpubTypes_end0: + .section ".note.GNU-stack","",@progbits + .addrsig + .section .debug_line,"",@progbits +.Lline_table_start0: diff --git a/bolt/test/X86/dwarf4-dwp-x86.s b/bolt/test/X86/dwarf4-dwp-x86.s new file mode 100755 index 0000000000000..6dde1678f3840 --- /dev/null +++ b/bolt/test/X86/dwarf4-dwp-x86.s @@ -0,0 +1,405 @@ +## This test checks updating debuginfo via dwarf4 dwp file +# RUN: rm -rf %t && mkdir -p %t && cd %t +# RUN: split-file %s %t +# RUN: %clangxx %cxxflags -g -gdwarf-4 -gsplit-dwarf %t/main.s %t/callee.s -o main.exe +# RUN: llvm-dwp -e %t/main.exe -o %t/main.exe.dwp +# RUN: llvm-bolt %t/main.exe -o %t/main.exe.bolt -update-debug-sections 2>&1 | FileCheck %s + +# CHECK-NOT: Assertion + +#--- main.s + .file "main.cpp" + .globl main # -- Begin function main + .type main,@function +main: # @main +.Lfunc_begin0: + .file 1 "." "main.cpp" + .loc 1 2 0 # main.cpp:2:0 + .loc 1 2 21 prologue_end # main.cpp:2:21 + .loc 1 2 14 epilogue_begin is_stmt 0 # main.cpp:2:14 + retq +.Lfunc_end0: + .size main, .Lfunc_end0-main + .section .debug_abbrev,"",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 0 # DW_CHILDREN_no + .byte 16 # DW_AT_stmt_list + .byte 23 # DW_FORM_sec_offset + .byte 27 # DW_AT_comp_dir + .byte 14 # DW_FORM_strp + .ascii "\264B" # DW_AT_GNU_pubnames + .byte 25 # DW_FORM_flag_present + .ascii "\260B" # DW_AT_GNU_dwo_name + .byte 14 # DW_FORM_strp + .ascii "\261B" # DW_AT_GNU_dwo_id + .byte 7 # DW_FORM_data8 + .byte 17 # DW_AT_low_pc + .byte 1 # DW_FORM_addr + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .ascii "\263B" # DW_AT_GNU_addr_base + .byte 23 # DW_FORM_sec_offset + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit +.Ldebug_info_start0: + .short 4 # DWARF version number + .long .debug_abbrev # Offset Into Abbrev. Section + .byte 8 # Address Size (in bytes) + .byte 1 # Abbrev [1] 0xb:0x25 DW_TAG_compile_unit + .long .Lline_table_start0 # DW_AT_stmt_list + .long .Lskel_string0 # DW_AT_comp_dir + # DW_AT_GNU_pubnames + .long .Lskel_string1 # DW_AT_GNU_dwo_name + .quad 1465063543908291764 # DW_AT_GNU_dwo_id + .quad .Lfunc_begin0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .long .Laddr_table_base0 # DW_AT_GNU_addr_base +.Ldebug_info_end0: + .section .debug_str,"MS",@progbits,1 +.Lskel_string0: + .asciz "." # string offset=0 +.Lskel_string1: + .asciz "main.exe-main.dwo" # string offset=2 + .section .debug_str.dwo,"eMS",@progbits,1 +.Linfo_string0: + .asciz "main" # string offset=0 +.Linfo_string1: + .asciz "int" # string offset=5 +.Linfo_string2: + .byte 0 # string offset=9 +.Linfo_string3: + .asciz "main.cpp" # string offset=10 +.Linfo_string4: + .asciz "main.exe-main.dwo" # string offset=19 + .section .debug_str_offsets.dwo,"e",@progbits + .long 0 + .long 5 + .long 9 + .long 10 + .long 19 + .section .debug_info.dwo,"e",@progbits + .long .Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 # Length of Unit +.Ldebug_info_dwo_start0: + .short 4 # DWARF version number + .long 0 # Offset Into Abbrev. Section + .byte 8 # Address Size (in bytes) + .byte 1 # Abbrev [1] 0xb:0x22 DW_TAG_compile_unit + .byte 2 # DW_AT_producer + .short 33 # DW_AT_language + .byte 3 # DW_AT_name + .byte 4 # DW_AT_GNU_dwo_name + .quad 1465063543908291764 # DW_AT_GNU_dwo_id + .byte 2 # Abbrev [2] 0x19:0xf DW_TAG_subprogram + .byte 0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .byte 1 # DW_AT_frame_base + .byte 86 + .byte 0 # DW_AT_name + .byte 1 # DW_AT_decl_file + .byte 2 # DW_AT_decl_line + .long 40 # DW_AT_type + # DW_AT_external + .byte 3 # Abbrev [3] 0x28:0x4 DW_TAG_base_type + .byte 1 # DW_AT_name + .byte 5 # DW_AT_encoding + .byte 4 # DW_AT_byte_size + .byte 0 # End Of Children Mark +.Ldebug_info_dwo_end0: + .section .debug_abbrev.dwo,"e",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 1 # DW_CHILDREN_yes + .byte 37 # DW_AT_producer + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 19 # DW_AT_language + .byte 5 # DW_FORM_data2 + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .ascii "\260B" # DW_AT_GNU_dwo_name + .ascii "\202>" # DW_FORM_GNU_str_index + .ascii "\261B" # DW_AT_GNU_dwo_id + .byte 7 # DW_FORM_data8 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 2 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 0 # DW_CHILDREN_no + .byte 17 # DW_AT_low_pc + .ascii "\201>" # DW_FORM_GNU_addr_index + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 64 # DW_AT_frame_base + .byte 24 # DW_FORM_exprloc + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 3 # Abbreviation Code + .byte 36 # DW_TAG_base_type + .byte 0 # DW_CHILDREN_no + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 62 # DW_AT_encoding + .byte 11 # DW_FORM_data1 + .byte 11 # DW_AT_byte_size + .byte 11 # DW_FORM_data1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_addr,"",@progbits +.Laddr_table_base0: + .quad .Lfunc_begin0 + .section .debug_gnu_pubnames,"",@progbits + .long .LpubNames_end0-.LpubNames_start0 # Length of Public Names Info +.LpubNames_start0: + .short 2 # DWARF Version + .long .Lcu_begin0 # Offset of Compilation Unit Info + .long 48 # Compilation Unit Length + .long 25 # DIE offset + .byte 48 # Attributes: FUNCTION, EXTERNAL + .asciz "main" # External Name + .long 0 # End Mark +.LpubNames_end0: + .section .debug_gnu_pubtypes,"",@progbits + .long .LpubTypes_end0-.LpubTypes_start0 # Length of Public Types Info +.LpubTypes_start0: + .short 2 # DWARF Version + .long .Lcu_begin0 # Offset of Compilation Unit Info + .long 48 # Compilation Unit Length + .long 40 # DIE offset + .byte 144 # Attributes: TYPE, STATIC + .asciz "int" # External Name + .long 0 # End Mark +.LpubTypes_end0: + .section ".note.GNU-stack","",@progbits + .addrsig + .addrsig_sym _Z6calleei + .section .debug_line,"",@progbits +.Lline_table_start0: +#--- callee.s + .file "callee.cpp" + .globl _Z6calleei # -- Begin function _Z6calleei + .type _Z6calleei,@function +_Z6calleei: # @_Z6calleei +.Lfunc_begin0: + .file 1 "." "callee.cpp" + .loc 1 1 0 # callee.cpp:1:0 + .loc 1 1 28 prologue_end # callee.cpp:1:28 + .loc 1 1 21 epilogue_begin is_stmt 0 # callee.cpp:1:21 + retq +.Lfunc_end0: + .size _Z6calleei, .Lfunc_end0-_Z6calleei + .section .debug_abbrev,"",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 0 # DW_CHILDREN_no + .byte 16 # DW_AT_stmt_list + .byte 23 # DW_FORM_sec_offset + .byte 27 # DW_AT_comp_dir + .byte 14 # DW_FORM_strp + .ascii "\264B" # DW_AT_GNU_pubnames + .byte 25 # DW_FORM_flag_present + .ascii "\260B" # DW_AT_GNU_dwo_name + .byte 14 # DW_FORM_strp + .ascii "\261B" # DW_AT_GNU_dwo_id + .byte 7 # DW_FORM_data8 + .byte 17 # DW_AT_low_pc + .byte 1 # DW_FORM_addr + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .ascii "\263B" # DW_AT_GNU_addr_base + .byte 23 # DW_FORM_sec_offset + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_info,"",@progbits +.Lcu_begin0: + .long .Ldebug_info_end0-.Ldebug_info_start0 # Length of Unit +.Ldebug_info_start0: + .short 4 # DWARF version number + .long .debug_abbrev # Offset Into Abbrev. Section + .byte 8 # Address Size (in bytes) + .byte 1 # Abbrev [1] 0xb:0x25 DW_TAG_compile_unit + .long .Lline_table_start0 # DW_AT_stmt_list + .long .Lskel_string0 # DW_AT_comp_dir + # DW_AT_GNU_pubnames + .long .Lskel_string1 # DW_AT_GNU_dwo_name + .quad -8413212350243343807 # DW_AT_GNU_dwo_id + .quad .Lfunc_begin0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .long .Laddr_table_base0 # DW_AT_GNU_addr_base +.Ldebug_info_end0: + .section .debug_str,"MS",@progbits,1 +.Lskel_string0: + .asciz "." # string offset=0 +.Lskel_string1: + .asciz "main.exe-callee.dwo" # string offset=2 + .section .debug_str.dwo,"eMS",@progbits,1 +.Linfo_string0: + .asciz "_Z6calleei" # string offset=0 +.Linfo_string1: + .asciz "callee" # string offset=11 +.Linfo_string2: + .asciz "int" # string offset=18 +.Linfo_string3: + .asciz "x" # string offset=22 +.Linfo_string4: + .byte 0 # string offset=24 +.Linfo_string5: + .asciz "callee.cpp" # string offset=25 +.Linfo_string6: + .asciz "main.exe-callee.dwo" # string offset=36 + .section .debug_str_offsets.dwo,"e",@progbits + .long 0 + .long 11 + .long 18 + .long 22 + .long 24 + .long 25 + .long 36 + .section .debug_info.dwo,"e",@progbits + .long .Ldebug_info_dwo_end0-.Ldebug_info_dwo_start0 # Length of Unit +.Ldebug_info_dwo_start0: + .short 4 # DWARF version number + .long 0 # Offset Into Abbrev. Section + .byte 8 # Address Size (in bytes) + .byte 1 # Abbrev [1] 0xb:0x2f DW_TAG_compile_unit + .byte 4 # DW_AT_producer + .short 33 # DW_AT_language + .byte 5 # DW_AT_name + .byte 6 # DW_AT_GNU_dwo_name + .quad -8413212350243343807 # DW_AT_GNU_dwo_id + .byte 2 # Abbrev [2] 0x19:0x1c DW_TAG_subprogram + .byte 0 # DW_AT_low_pc + .long .Lfunc_end0-.Lfunc_begin0 # DW_AT_high_pc + .byte 1 # DW_AT_frame_base + .byte 86 + .byte 0 # DW_AT_linkage_name + .byte 1 # DW_AT_name + .byte 1 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 53 # DW_AT_type + # DW_AT_external + .byte 3 # Abbrev [3] 0x29:0xb DW_TAG_formal_parameter + .byte 2 # DW_AT_location + .byte 145 + .byte 124 + .byte 3 # DW_AT_name + .byte 1 # DW_AT_decl_file + .byte 1 # DW_AT_decl_line + .long 53 # DW_AT_type + .byte 0 # End Of Children Mark + .byte 4 # Abbrev [4] 0x35:0x4 DW_TAG_base_type + .byte 2 # DW_AT_name + .byte 5 # DW_AT_encoding + .byte 4 # DW_AT_byte_size + .byte 0 # End Of Children Mark +.Ldebug_info_dwo_end0: + .section .debug_abbrev.dwo,"e",@progbits + .byte 1 # Abbreviation Code + .byte 17 # DW_TAG_compile_unit + .byte 1 # DW_CHILDREN_yes + .byte 37 # DW_AT_producer + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 19 # DW_AT_language + .byte 5 # DW_FORM_data2 + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .ascii "\260B" # DW_AT_GNU_dwo_name + .ascii "\202>" # DW_FORM_GNU_str_index + .ascii "\261B" # DW_AT_GNU_dwo_id + .byte 7 # DW_FORM_data8 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 2 # Abbreviation Code + .byte 46 # DW_TAG_subprogram + .byte 1 # DW_CHILDREN_yes + .byte 17 # DW_AT_low_pc + .ascii "\201>" # DW_FORM_GNU_addr_index + .byte 18 # DW_AT_high_pc + .byte 6 # DW_FORM_data4 + .byte 64 # DW_AT_frame_base + .byte 24 # DW_FORM_exprloc + .byte 110 # DW_AT_linkage_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 63 # DW_AT_external + .byte 25 # DW_FORM_flag_present + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 3 # Abbreviation Code + .byte 5 # DW_TAG_formal_parameter + .byte 0 # DW_CHILDREN_no + .byte 2 # DW_AT_location + .byte 24 # DW_FORM_exprloc + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 58 # DW_AT_decl_file + .byte 11 # DW_FORM_data1 + .byte 59 # DW_AT_decl_line + .byte 11 # DW_FORM_data1 + .byte 73 # DW_AT_type + .byte 19 # DW_FORM_ref4 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 4 # Abbreviation Code + .byte 36 # DW_TAG_base_type + .byte 0 # DW_CHILDREN_no + .byte 3 # DW_AT_name + .ascii "\202>" # DW_FORM_GNU_str_index + .byte 62 # DW_AT_encoding + .byte 11 # DW_FORM_data1 + .byte 11 # DW_AT_byte_size + .byte 11 # DW_FORM_data1 + .byte 0 # EOM(1) + .byte 0 # EOM(2) + .byte 0 # EOM(3) + .section .debug_addr,"",@progbits +.Laddr_table_base0: + .quad .Lfunc_begin0 + .section .debug_gnu_pubnames,"",@progbits + .long .LpubNames_end0-.LpubNames_start0 # Length of Public Names Info +.LpubNames_start0: + .short 2 # DWARF Version + .long .Lcu_begin0 # Offset of Compilation Unit Info + .long 48 # Compilation Unit Length + .long 25 # DIE offset + .byte 48 # Attributes: FUNCTION, EXTERNAL + .asciz "callee" # External Name + .long 0 # End Mark +.LpubNames_end0: + .section .debug_gnu_pubtypes,"",@progbits + .long .LpubTypes_end0-.LpubTypes_start0 # Length of Public Types Info +.LpubTypes_start0: + .short 2 # DWARF Version + .long .Lcu_begin0 # Offset of Compilation Unit Info + .long 48 # Compilation Unit Length + .long 53 # DIE offset + .byte 144 # Attributes: TYPE, STATIC + .asciz "int" # External Name + .long 0 # End Mark +.LpubTypes_end0: + .section ".note.GNU-stack","",@progbits + .addrsig + .section .debug_line,"",@progbits +.Lline_table_start0: diff --git a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py index 471dbf8c110b4..49a1b14932644 100755 --- a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py +++ b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py @@ -26,7 +26,7 @@ import json import multiprocessing import os -import Queue +from queue import Queue import shutil import subprocess import sys @@ -105,7 +105,7 @@ def main(): try: # Spin up a bunch of tidy-launching threads. - queue = Queue.Queue(max_task) + queue = Queue(max_task) for _ in range(max_task): t = threading.Thread( target=run_find_all_symbols, args=(args, tmpdir, build_path, queue) diff --git a/clang-tools-extra/clang-tidy/bugprone/ThrowKeywordMissingCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/ThrowKeywordMissingCheck.cpp index cafb4a3e5f0e5..9781f0a5ac9de 100644 --- a/clang-tools-extra/clang-tidy/bugprone/ThrowKeywordMissingCheck.cpp +++ b/clang-tools-extra/clang-tidy/bugprone/ThrowKeywordMissingCheck.cpp @@ -20,7 +20,8 @@ void ThrowKeywordMissingCheck::registerMatchers(MatchFinder *Finder) { hasType(cxxRecordDecl(anyOf( matchesName("[Ee]xception|EXCEPTION"), hasAnyBase(hasType(hasCanonicalType(recordType(hasDeclaration( - cxxRecordDecl(matchesName("[Ee]xception|EXCEPTION")))))))))), + cxxRecordDecl(matchesName("[Ee]xception|EXCEPTION")) + .bind("base"))))))))), unless(anyOf( hasAncestor( stmt(anyOf(cxxThrowExpr(), callExpr(), returnStmt()))), @@ -39,6 +40,11 @@ void ThrowKeywordMissingCheck::check(const MatchFinder::MatchResult &Result) { diag(TemporaryExpr->getBeginLoc(), "suspicious exception object created but " "not thrown; did you mean 'throw %0'?") << TemporaryExpr->getType().getBaseTypeIdentifier()->getName(); + + if (const auto *BaseDecl = Result.Nodes.getNodeAs("base")) + diag(BaseDecl->getLocation(), + "object type inherits from base class declared here", + DiagnosticIDs::Note); } } // namespace clang::tidy::bugprone diff --git a/clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp index 4dc4baecddd50..4084d713665ea 100644 --- a/clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp +++ b/clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp @@ -53,7 +53,7 @@ StatementMatcher makeCastSequenceMatcher(llvm::ArrayRef NameList) { unless(hasImplicitDestinationType( qualType(matchers::matchesAnyListedTypeName(NameList))))); - auto IsOrHasDescendant = [](auto InnerMatcher) { + auto IsOrHasDescendant = [](const auto &InnerMatcher) { return anyOf(InnerMatcher, hasDescendant(InnerMatcher)); }; diff --git a/clang-tools-extra/clang-tidy/utils/ExceptionAnalyzer.cpp b/clang-tools-extra/clang-tidy/utils/ExceptionAnalyzer.cpp index bdde7249d2796..fd4320eb8144b 100644 --- a/clang-tools-extra/clang-tidy/utils/ExceptionAnalyzer.cpp +++ b/clang-tools-extra/clang-tidy/utils/ExceptionAnalyzer.cpp @@ -595,6 +595,11 @@ ExceptionAnalyzer::throwsException(const Stmt *St, Results.merge(DestructorExcs); } } + } else if (const auto *Lambda = dyn_cast(St)) { + for (const Stmt *Init : Lambda->capture_inits()) { + ExceptionInfo Excs = throwsException(Init, Caught, CallStack); + Results.merge(Excs); + } } else { for (const Stmt *Child : St->children()) { ExceptionInfo Excs = throwsException(Child, Caught, CallStack); diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst index c7ba1195f84f6..c3a6d2f9b2890 100644 --- a/clang-tools-extra/docs/ReleaseNotes.rst +++ b/clang-tools-extra/docs/ReleaseNotes.rst @@ -244,6 +244,11 @@ Changes in existing checks correcting a spelling mistake on its option ``NamePrefixSuffixSilenceDissimilarityTreshold``. +- Improved :doc:`bugprone-exception-escape + ` check's handling of lambdas: + exceptions from captures are now diagnosed, exceptions in the bodies of + lambdas that aren't actually invoked are not. + - Improved :doc:`bugprone-infinite-loop ` check by adding detection for variables introduced by structured bindings. @@ -272,7 +277,8 @@ Changes in existing checks - Improved :doc:`bugprone-throw-keyword-missing ` check by only considering - the canonical types of base classes as written. + the canonical types of base classes as written and adding a note on the base + class that triggered the warning. - Improved :doc:`bugprone-unchecked-optional-access ` check by supporting @@ -323,6 +329,11 @@ Changes in existing checks ` check to suggest using designated initializers for aliased aggregate types. +- Improved :doc:`modernize-use-nullptr + ` check by fixing a crash + on Windows when the check was enabled with a 32-bit :program:`clang-tidy` + binary. + - Improved :doc:`modernize-use-std-format ` check to correctly match when the format string is converted to a different type by an implicit diff --git a/clang-tools-extra/docs/clang-tidy/Contributing.rst b/clang-tools-extra/docs/clang-tidy/Contributing.rst index ad7f22381a3ca..4ede4ea36c13d 100644 --- a/clang-tools-extra/docs/clang-tidy/Contributing.rst +++ b/clang-tools-extra/docs/clang-tidy/Contributing.rst @@ -436,7 +436,7 @@ in the release notes, as the first sentence in the doxygen comments in the heade for your check class and as the first sentence of the check documentation. Avoid the phrase "this check" in your check summary and check documentation. -If your check relates to a published coding guideline (C++ Core Guidelines, MISRA, etc.) +If your check relates to a published coding guideline (C++ Core Guidelines, SEI CERT, etc.) or style guide, provide links to the relevant guideline or style guide sections in your check documentation. diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/exception-escape.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/exception-escape.cpp index b10bd1d482867..a52bbe2246d1e 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/exception-escape.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/exception-escape.cpp @@ -894,3 +894,65 @@ void pointer_exception_can_not_escape_with_void_handler() noexcept { } catch (void *) { } } + +void throw_in_uninvoked_lambda() noexcept { + [] { throw 42; }; +} + +void throw_in_lambda() noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_in_lambda' which should not throw exceptions + [] { throw 42; }(); + // CHECK-MESSAGES: :[[@LINE-1]]:8: note: frame #0: unhandled exception of type 'int' may be thrown in function 'operator()' here + // CHECK-MESSAGES: :[[@LINE-2]]:19: note: frame #1: function 'throw_in_lambda' calls function 'operator()' here +} + +struct copy_constructor_throws { + copy_constructor_throws(const copy_constructor_throws&) { throw 42; } +}; + +void throw_in_lambda_default_by_value_capture(const copy_constructor_throws& a) noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_in_lambda_default_by_value_capture' which should not throw exceptions + [=] { a; }; + // CHECK-MESSAGES: :[[@LINE-6]]:61: note: frame #0: unhandled exception of type 'int' may be thrown in function 'copy_constructor_throws' here + // CHECK-MESSAGES: :[[@LINE-2]]:4: note: frame #1: function 'throw_in_lambda_default_by_value_capture' calls function 'copy_constructor_throws' here +} + +void throw_in_lambda_explicit_by_value_capture(const copy_constructor_throws& a) noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_in_lambda_explicit_by_value_capture' which should not throw exceptions + [a] {}; + // CHECK-MESSAGES: :[[@LINE-13]]:61: note: frame #0: unhandled exception of type 'int' may be thrown in function 'copy_constructor_throws' here + // CHECK-MESSAGES: :[[@LINE-2]]:4: note: frame #1: function 'throw_in_lambda_explicit_by_value_capture' calls function 'copy_constructor_throws' here +} + +void no_throw_in_lambda_by_reference_capture(const copy_constructor_throws& a) noexcept { + [&] { a; }; + [&a] {}; +} + +void throw_in_lambda_init_capture() noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_in_lambda_init_capture' which should not throw exceptions + [a = [] { throw 42; return 0; }()] {}; + // CHECK-MESSAGES: :[[@LINE-1]]:13: note: frame #0: unhandled exception of type 'int' may be thrown in function 'operator()' here + // CHECK-MESSAGES: :[[@LINE-2]]:34: note: frame #1: function 'throw_in_lambda_init_capture' calls function 'operator()' here +} + +void throw_from_nested_lambda() noexcept { + // CHECK-MESSAGES: :[[@LINE-1]]:6: warning: an exception may be thrown in function 'throw_from_nested_lambda' which should not throw exceptions + [] { [] { throw 42; }(); }(); + // CHECK-MESSAGES: :[[@LINE-1]]:13: note: frame #0: unhandled exception of type 'int' may be thrown in function 'operator()' here + // CHECK-MESSAGES: :[[@LINE-2]]:24: note: frame #1: function 'operator()' calls function 'operator()' here + // CHECK-MESSAGES: :[[@LINE-3]]:29: note: frame #2: function 'throw_from_nested_lambda' calls function 'operator()' here +} + +const auto throw_in_noexcept_lambda = [] () noexcept { throw 42; }; +// CHECK-MESSAGES: :[[@LINE-1]]:39: warning: an exception may be thrown in function 'operator()' which should not throw exceptions +// CHECK-MESSAGES: :[[@LINE-2]]:56: note: frame #0: unhandled exception of type 'int' may be thrown in function 'operator()' here + +void thrower() { + throw 42; +} + +const auto indirect_throw_in_noexcept_lambda = [] () noexcept { thrower(); }; +// CHECK-MESSAGES: :[[@LINE-1]]:48: warning: an exception may be thrown in function 'operator()' which should not throw exceptions +// CHECK-MESSAGES: :[[@LINE-5]]:3: note: frame #0: unhandled exception of type 'int' may be thrown in function 'thrower' here +// CHECK-MESSAGES: :[[@LINE-3]]:65: note: frame #1: function 'operator()' calls function 'thrower' here diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/throw-keyword-missing.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/throw-keyword-missing.cpp index 6ddaf246a354e..0ae51780ccc00 100644 --- a/clang-tools-extra/test/clang-tidy/checkers/bugprone/throw-keyword-missing.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/throw-keyword-missing.cpp @@ -20,6 +20,7 @@ typedef basic_string string; typedef basic_string wstring; // std::exception and std::runtime_error declaration. +// CHECK-MESSAGES-DAG: [[#EXCEPTION_LINE:@LINE + 1]]:8 struct exception { exception(); exception(const exception &other); @@ -50,12 +51,13 @@ struct RegularException { void stdExceptionNotTrownTest(int i) { if (i < 0) - // CHECK-MESSAGES: :[[@LINE+1]]:5: warning: suspicious exception object created but not thrown; did you mean 'throw {{.*}}'? [bugprone-throw-keyword-missing] + // CHECK-MESSAGES-DAG: :[[@LINE+1]]:5: warning: suspicious exception object created but not thrown; did you mean 'throw {{.*}}'? [bugprone-throw-keyword-missing] std::exception(); if (i > 0) - // CHECK-MESSAGES: :[[@LINE+1]]:5: warning: suspicious exception + // CHECK-MESSAGES-DAG: :[[@LINE+1]]:5: warning: suspicious exception std::runtime_error("Unexpected argument"); + // CHECK-MESSAGES: note: object type inherits from base class declared here } void stdExceptionThrownTest(int i) { @@ -181,6 +183,7 @@ class RegularError : public ERROR_BASE {}; void typedefTest() { // CHECK-MESSAGES: :[[@LINE+1]]:3: warning: suspicious exception RegularError(); + // CHECK-MESSAGES: :[[#EXCEPTION_LINE]]:8: note: object type inherits from base class declared here } struct ExceptionRAII { diff --git a/clang/CMakeLists.txt b/clang/CMakeLists.txt index 1bb73599970c1..4eaa712899856 100644 --- a/clang/CMakeLists.txt +++ b/clang/CMakeLists.txt @@ -80,6 +80,12 @@ if(CLANG_BUILT_STANDALONE) include(GetErrcMessages) include(LLVMDistributionSupport) + if(CMAKE_CROSSCOMPILING) + set(LLVM_USE_HOST_TOOLS ON) + include(CrossCompile) + llvm_create_cross_target(Clang NATIVE "" Release) + endif() + set(PACKAGE_VERSION "${LLVM_PACKAGE_VERSION}") set(BUG_REPORT_URL "${LLVM_PACKAGE_BUGREPORT}" CACHE STRING "Default URL where bug reports are to be submitted.") diff --git a/clang/bindings/python/clang/cindex.py b/clang/bindings/python/clang/cindex.py index c44e646a30f17..80140d2787608 100644 --- a/clang/bindings/python/clang/cindex.py +++ b/clang/bindings/python/clang/cindex.py @@ -1446,6 +1446,9 @@ def is_unexposed(self): # OpenMP stripe directive. OMP_STRIPE_DIRECTIVE = 310 + # OpenMP fuse directive. + OMP_FUSE_DIRECTIVE = 311 + # OpenACC Compute Construct. OPEN_ACC_COMPUTE_DIRECTIVE = 320 diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst index 6108e54a17390..68ca7bedddb06 100644 --- a/clang/docs/OpenMPSupport.rst +++ b/clang/docs/OpenMPSupport.rst @@ -482,6 +482,8 @@ implementation. +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | loop transformation apply clause | :none:`unclaimed` | :none:`unclaimed` | | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ +| loop fuse transformation | :good:`done` | :none:`unclaimed` | | ++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | workdistribute construct | | :none:`in progress` | @skc7, @mjklemm | +-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+ | task_iteration | :none:`unclaimed` | :none:`unclaimed` | | diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 9744c4f17610b..270b5d336eba7 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -299,7 +299,8 @@ Improvements to Clang's diagnostics "format specifies type 'unsigned int' but the argument has type 'int', which differs in signedness [-Wformat-signedness]" "signedness of format specifier 'u' is incompatible with 'c' [-Wformat-signedness]" and the API-visible diagnostic id will be appropriate. - +- Clang now produces better diagnostics for template template parameter matching + involving 'auto' template parameters. - Fixed false positives in ``-Waddress-of-packed-member`` diagnostics when potential misaligned members get processed before they can get discarded. (#GH144729) @@ -360,6 +361,7 @@ Bug Fixes in This Version first parameter. (#GH113323). - Fixed a crash with incompatible pointer to integer conversions in designated initializers involving string literals. (#GH154046) +- Fix crash on CTAD for alias template. (#GH131342) - Clang now emits a frontend error when a function marked with the `flatten` attribute calls another function that requires target features not enabled in the caller. This prevents a fatal error in the backend. @@ -566,6 +568,7 @@ Crash and bug fixes - Fixed a crash in the static analyzer that when the expression in an ``[[assume(expr)]]`` attribute was enclosed in parentheses. (#GH151529) - Fixed a crash when parsing ``#embed`` parameters with unmatched closing brackets. (#GH152829) +- Fixed a crash when compiling ``__real__`` or ``__imag__`` unary operator on scalar value with type promotion. (#GH160583) Improvements ^^^^^^^^^^^^ @@ -596,6 +599,7 @@ OpenMP Support - Added support for ``defaultmap`` directive implicit-behavior ``storage``. - Added support for ``defaultmap`` directive implicit-behavior ``private``. - Added parsing and semantic analysis support for ``groupprivate`` directive. +- Added support for 'omp fuse' directive. Improvements ^^^^^^^^^^^^ diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h index be038d9165fc6..f13d9c9307b40 100644 --- a/clang/include/clang-c/Index.h +++ b/clang/include/clang-c/Index.h @@ -2162,6 +2162,10 @@ enum CXCursorKind { */ CXCursor_OMPStripeDirective = 310, + /** OpenMP fuse directive + */ + CXCursor_OMPFuseDirective = 311, + /** OpenACC Compute Construct. */ CXCursor_OpenACCComputeConstruct = 320, diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h index a2c55c71e09ae..12351e98e5a2b 100644 --- a/clang/include/clang/AST/ASTContext.h +++ b/clang/include/clang/AST/ASTContext.h @@ -25,10 +25,12 @@ #include "clang/AST/RawCommentList.h" #include "clang/AST/SYCLKernelInfo.h" #include "clang/AST/TemplateName.h" +#include "clang/AST/TypeOrdering.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" @@ -51,6 +53,36 @@ class FixedPointSemantics; struct fltSemantics; template class SmallPtrSet; +struct ScalableVecTyKey { + clang::QualType EltTy; + unsigned NumElts; + unsigned NumFields; + + bool operator==(const ScalableVecTyKey &RHS) const { + return EltTy == RHS.EltTy && NumElts == RHS.NumElts && + NumFields == RHS.NumFields; + } +}; + +// Provide a DenseMapInfo specialization so that ScalableVecTyKey can be used +// as a key in DenseMap. +template <> struct DenseMapInfo { + static inline ScalableVecTyKey getEmptyKey() { + return {DenseMapInfo::getEmptyKey(), ~0U, ~0U}; + } + static inline ScalableVecTyKey getTombstoneKey() { + return {DenseMapInfo::getTombstoneKey(), ~0U, ~0U}; + } + static unsigned getHashValue(const ScalableVecTyKey &Val) { + return hash_combine(DenseMapInfo::getHashValue(Val.EltTy), + Val.NumElts, Val.NumFields); + } + static bool isEqual(const ScalableVecTyKey &LHS, + const ScalableVecTyKey &RHS) { + return LHS == RHS; + } +}; + } // namespace llvm namespace clang { @@ -505,6 +537,9 @@ class ASTContext : public RefCountedBase { SmallVector> ObjCSubClasses; + // A mapping from Scalable Vector Type keys to their corresponding QualType. + mutable llvm::DenseMap ScalableVecTyMap; + ASTContext &this_() { return *this; } public: diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h index 9fedb230ce397..5f16bac94d5e6 100644 --- a/clang/include/clang/AST/ExprCXX.h +++ b/clang/include/clang/AST/ExprCXX.h @@ -4714,7 +4714,7 @@ class SubstNonTypeTemplateParmExpr : public Expr { // sugared: it doesn't need to be resugared later. bool getFinal() const { return Final; } - NamedDecl *getParameter() const; + NonTypeTemplateParmDecl *getParameter() const; bool isReferenceParameter() const { return AssociatedDeclAndRef.getInt(); } diff --git a/clang/include/clang/AST/OpenMPClause.h b/clang/include/clang/AST/OpenMPClause.h index 42b426815920d..68d220a77b18c 100644 --- a/clang/include/clang/AST/OpenMPClause.h +++ b/clang/include/clang/AST/OpenMPClause.h @@ -1149,6 +1149,80 @@ class OMPFullClause final : public OMPNoChildClause { static OMPFullClause *CreateEmpty(const ASTContext &C); }; +/// This class represents the 'looprange' clause in the +/// '#pragma omp fuse' directive +/// +/// \code {c} +/// #pragma omp fuse looprange(1,2) +/// { +/// for(int i = 0; i < 64; ++i) +/// for(int j = 0; j < 256; j+=2) +/// for(int k = 127; k >= 0; --k) +/// \endcode +class OMPLoopRangeClause final : public OMPClause { + friend class OMPClauseReader; + /// Location of '(' + SourceLocation LParenLoc; + + /// Location of first and count expressions + SourceLocation FirstLoc, CountLoc; + + /// Number of looprange arguments (always 2: first, count) + enum { FirstExpr, CountExpr, NumArgs }; + Stmt *Args[NumArgs] = {nullptr, nullptr}; + + /// Set looprange 'first' expression + void setFirst(Expr *E) { Args[FirstExpr] = E; } + + /// Set looprange 'count' expression + void setCount(Expr *E) { Args[CountExpr] = E; } + + /// Build an empty clause for deserialization. + explicit OMPLoopRangeClause() + : OMPClause(llvm::omp::OMPC_looprange, {}, {}) {} + +public: + /// Build a 'looprange' clause AST node. + static OMPLoopRangeClause * + Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation FirstLoc, SourceLocation CountLoc, + SourceLocation EndLoc, Expr *First, Expr *Count); + + /// Build an empty 'looprange' clause node. + static OMPLoopRangeClause *CreateEmpty(const ASTContext &C); + + // Location getters/setters + SourceLocation getLParenLoc() const { return LParenLoc; } + SourceLocation getFirstLoc() const { return FirstLoc; } + SourceLocation getCountLoc() const { return CountLoc; } + + void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } + void setFirstLoc(SourceLocation Loc) { FirstLoc = Loc; } + void setCountLoc(SourceLocation Loc) { CountLoc = Loc; } + + /// Get looprange 'first' expression + Expr *getFirst() const { return cast_or_null(Args[FirstExpr]); } + + /// Get looprange 'count' expression + Expr *getCount() const { return cast_or_null(Args[CountExpr]); } + + child_range children() { return child_range(Args, Args + NumArgs); } + const_child_range children() const { + return const_child_range(Args, Args + NumArgs); + } + + child_range used_children() { + return child_range(child_iterator(), child_iterator()); + } + const_child_range used_children() const { + return const_child_range(const_child_iterator(), const_child_iterator()); + } + + static bool classof(const OMPClause *T) { + return T->getClauseKind() == llvm::omp::OMPC_looprange; + } +}; + /// Representation of the 'partial' clause of the '#pragma omp unroll' /// directive. /// diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h index af1a073cc4a5a..7a2881f6124f3 100644 --- a/clang/include/clang/AST/RecursiveASTVisitor.h +++ b/clang/include/clang/AST/RecursiveASTVisitor.h @@ -3177,6 +3177,9 @@ DEF_TRAVERSE_STMT(OMPUnrollDirective, DEF_TRAVERSE_STMT(OMPReverseDirective, { TRY_TO(TraverseOMPExecutableDirective(S)); }) +DEF_TRAVERSE_STMT(OMPFuseDirective, + { TRY_TO(TraverseOMPExecutableDirective(S)); }) + DEF_TRAVERSE_STMT(OMPInterchangeDirective, { TRY_TO(TraverseOMPExecutableDirective(S)); }) @@ -3494,6 +3497,14 @@ bool RecursiveASTVisitor::VisitOMPFullClause(OMPFullClause *C) { return true; } +template +bool RecursiveASTVisitor::VisitOMPLoopRangeClause( + OMPLoopRangeClause *C) { + TRY_TO(TraverseStmt(C->getFirst())); + TRY_TO(TraverseStmt(C->getCount())); + return true; +} + template bool RecursiveASTVisitor::VisitOMPPartialClause(OMPPartialClause *C) { TRY_TO(TraverseStmt(C->getFactor())); diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h index d9f87f1e49b40..bc6aeaa8d143c 100644 --- a/clang/include/clang/AST/StmtOpenMP.h +++ b/clang/include/clang/AST/StmtOpenMP.h @@ -21,6 +21,7 @@ #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" +#include "llvm/Support/Casting.h" namespace clang { @@ -677,6 +678,10 @@ class OMPParallelDirective : public OMPExecutableDirective { } }; +// Forward declaration of a generic loop transformation. Used in the declaration +// of OMPLoopBasedDirective. +class OMPLoopTransformationDirective; + /// The base class for all loop-based directives, including loop transformation /// directives. class OMPLoopBasedDirective : public OMPExecutableDirective { @@ -889,24 +894,23 @@ class OMPLoopBasedDirective : public OMPExecutableDirective { /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. - static bool doForAllLoops( - Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, - llvm::function_ref Callback, - llvm::function_ref - OnTransformationCallback); + static bool + doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, + unsigned NumLoops, + llvm::function_ref Callback, + llvm::function_ref + OnTransformationCallback); static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref Callback, - llvm::function_ref< - void(const OMPCanonicalLoopNestTransformationDirective *)> + llvm::function_ref OnTransformationCallback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; auto &&NewTransformCb = - [OnTransformationCallback]( - OMPCanonicalLoopNestTransformationDirective *A) { + [OnTransformationCallback](OMPLoopTransformationDirective *A) { OnTransformationCallback(A); }; return doForAllLoops(const_cast(CurStmt), TryImperfectlyNestedLoops, @@ -919,7 +923,7 @@ class OMPLoopBasedDirective : public OMPExecutableDirective { doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref Callback) { - auto &&TransformCb = [](OMPCanonicalLoopNestTransformationDirective *) {}; + auto &&TransformCb = [](OMPLoopTransformationDirective *) {}; return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback, TransformCb); } @@ -957,9 +961,11 @@ class OMPLoopBasedDirective : public OMPExecutableDirective { }; /// Common class of data shared between -/// OMPCanonicalLoopNestTransformationDirective and transformations over -/// canonical loop sequences. +/// OMPCanonicalLoopNestTransformationDirective and +/// OMPCanonicalLoopSequenceTransformationDirective class OMPLoopTransformationDirective { + friend class ASTStmtReader; + /// Number of (top-level) generated loops. /// This value is 1 for most transformations as they only map one loop nest /// into another. @@ -969,15 +975,39 @@ class OMPLoopTransformationDirective { /// generate more than one loop nest, so the value would be >= 1. unsigned NumGeneratedTopLevelLoops = 1; + /// We need this because we cannot easily make OMPLoopTransformationDirective + /// a proper Stmt. + Stmt *S = nullptr; + protected: void setNumGeneratedTopLevelLoops(unsigned N) { NumGeneratedTopLevelLoops = N; } + explicit OMPLoopTransformationDirective(Stmt *S) : S(S) {} + public: unsigned getNumGeneratedTopLevelLoops() const { return NumGeneratedTopLevelLoops; } + + /// Returns the specific directive related to this loop transformation. + Stmt *getDirective() const { return S; } + + /// Get the de-sugared statements after the loop transformation. + /// + /// Might be nullptr if either the directive generates no loops and is handled + /// directly in CodeGen, or resolving a template-dependence context is + /// required. + Stmt *getTransformedStmt() const; + + /// Return preinits statement. + Stmt *getPreInits() const; + + static bool classof(const Stmt *T) { + return isa(T); + } }; /// The base class for all transformation directives of canonical loop nests. @@ -990,7 +1020,8 @@ class OMPCanonicalLoopNestTransformationDirective explicit OMPCanonicalLoopNestTransformationDirective( StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) - : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops) {} + : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops), + OMPLoopTransformationDirective(this) {} public: /// Return the number of associated (consumed) loops. @@ -5928,6 +5959,112 @@ class OMPInterchangeDirective final } }; +/// The base class for all transformation directives of canonical loop +/// sequences (currently only 'fuse') +class OMPCanonicalLoopSequenceTransformationDirective + : public OMPExecutableDirective, + public OMPLoopTransformationDirective { + friend class ASTStmtReader; + +protected: + explicit OMPCanonicalLoopSequenceTransformationDirective( + StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, + SourceLocation EndLoc) + : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc), + OMPLoopTransformationDirective(this) {} + +public: + /// Get the de-sugared statements after the loop transformation. + /// + /// Might be nullptr if either the directive generates no loops and is handled + /// directly in CodeGen, or resolving a template-dependence context is + /// required. + Stmt *getTransformedStmt() const; + + /// Return preinits statement. + Stmt *getPreInits() const; + + static bool classof(const Stmt *T) { + Stmt::StmtClass C = T->getStmtClass(); + return C == OMPFuseDirectiveClass; + } +}; + +/// Represents the '#pragma omp fuse' loop transformation directive +/// +/// \code{c} +/// #pragma omp fuse +/// { +/// for(int i = 0; i < m1; ++i) {...} +/// for(int j = 0; j < m2; ++j) {...} +/// ... +/// } +/// \endcode +class OMPFuseDirective final + : public OMPCanonicalLoopSequenceTransformationDirective { + friend class ASTStmtReader; + friend class OMPExecutableDirective; + + // Offsets of child members. + enum { + PreInitsOffset = 0, + TransformedStmtOffset, + }; + + explicit OMPFuseDirective(SourceLocation StartLoc, SourceLocation EndLoc) + : OMPCanonicalLoopSequenceTransformationDirective( + OMPFuseDirectiveClass, llvm::omp::OMPD_fuse, StartLoc, EndLoc) {} + + void setPreInits(Stmt *PreInits) { + Data->getChildren()[PreInitsOffset] = PreInits; + } + + void setTransformedStmt(Stmt *S) { + Data->getChildren()[TransformedStmtOffset] = S; + } + +public: + /// Create a new AST node representation for #pragma omp fuse' + /// + /// \param C Context of the AST + /// \param StartLoc Location of the introducer (e.g the 'omp' token) + /// \param EndLoc Location of the directive's end (e.g the tok::eod) + /// \param Clauses The directive's clauses + /// \param NumLoops Total number of loops in the canonical loop sequence. + /// \param NumGeneratedTopLevelLoops Number of top-level generated loops. + // Typically 1 but looprange clause can + // change this. + /// \param AssociatedStmt The outermost associated loop + /// \param TransformedStmt The loop nest after fusion, or nullptr in + /// dependent + /// \param PreInits Helper preinits statements for the loop nest + static OMPFuseDirective * + Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + ArrayRef Clauses, unsigned NumGeneratedTopLevelLoops, + Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits); + + /// Build an empty '#pragma omp fuse' AST node for deserialization + /// + /// \param C Context of the AST + /// \param NumClauses Number of clauses to allocate + /// \param NumLoops Number of top level loops to allocate + static OMPFuseDirective *CreateEmpty(const ASTContext &C, + unsigned NumClauses); + + /// Gets the associated loops after the transformation. This is the de-sugared + /// replacement or nulltpr in dependent contexts. + Stmt *getTransformedStmt() const { + return Data->getChildren()[TransformedStmtOffset]; + } + + /// Return preinits statement. + Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } + + static bool classof(const Stmt *T) { + return T->getStmtClass() == OMPFuseDirectiveClass; + } +}; + /// This represents '#pragma omp scan' directive. /// /// \code @@ -6596,4 +6733,37 @@ class OMPAssumeDirective final : public OMPExecutableDirective { } // end namespace clang +namespace llvm { +// Allow a Stmt* be casted correctly to an OMPLoopTransformationDirective*. +// The default routines would just use a C-style cast which won't work well +// for the multiple inheritance here. We have to use a static cast from the +// corresponding subclass. +template <> +struct CastInfo + : public NullableValueCastFailed, + public DefaultDoCastIfPossible< + clang::OMPLoopTransformationDirective *, clang::Stmt *, + CastInfo> { + static bool isPossible(const clang::Stmt *T) { + return clang::OMPLoopTransformationDirective::classof(T); + } + + static clang::OMPLoopTransformationDirective *doCast(clang::Stmt *T) { + if (auto *D = + dyn_cast(T)) + return static_cast(D); + if (auto *D = + dyn_cast(T)) + return static_cast(D); + llvm_unreachable("unexpected type"); + } +}; +template <> +struct CastInfo + : public ConstStrippingForwardingCast< + clang::OMPLoopTransformationDirective, const clang::Stmt *, + CastInfo> {}; + +} // namespace llvm + #endif diff --git a/clang/include/clang/AST/TypeBase.h b/clang/include/clang/AST/TypeBase.h index b02d9c7499fe5..e0d00b82f2b76 100644 --- a/clang/include/clang/AST/TypeBase.h +++ b/clang/include/clang/AST/TypeBase.h @@ -3495,7 +3495,9 @@ class AdjustedType : public Type, public llvm::FoldingSetNode { AdjustedType(TypeClass TC, QualType OriginalTy, QualType AdjustedTy, QualType CanonicalPtr) - : Type(TC, CanonicalPtr, OriginalTy->getDependence()), + : Type(TC, CanonicalPtr, + AdjustedTy->getDependence() | + (OriginalTy->getDependence() & ~TypeDependence::Dependent)), OriginalTy(OriginalTy), AdjustedTy(AdjustedTy) {} public: diff --git a/clang/include/clang/Basic/DiagnosticCommonKinds.td b/clang/include/clang/Basic/DiagnosticCommonKinds.td index 0bd8a423c393e..6e50e225a8cc1 100644 --- a/clang/include/clang/Basic/DiagnosticCommonKinds.td +++ b/clang/include/clang/Basic/DiagnosticCommonKinds.td @@ -433,6 +433,12 @@ def err_omp_more_one_clause : Error< "directive '#pragma omp %0' cannot contain more than one '%1' clause%select{| with '%3' name modifier| with 'source' dependence}2">; def err_omp_required_clause : Error< "directive '#pragma omp %0' requires the '%1' clause">; +def warn_omp_gpu_unsupported_clause: Warning< + "clause '%0' is currently not supported on a GPU; clause ignored">, + InGroup; +def warn_omp_gpu_unsupported_modifier_for_clause: Warning< + "modifier '%0' is currently not supported on a GPU for the '%1' clause; modifier ignored">, + InGroup; // Static Analyzer Core def err_unknown_analyzer_checker_or_package : Error< diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index dc4c6d3de27c1..b157cbb0b8069 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -5770,8 +5770,10 @@ def err_template_recursion_depth_exceeded : Error< def err_constraint_depends_on_self : Error<"satisfaction of constraint %0 depends on itself">, NoSFINAE; -def note_template_recursion_depth : Note< - "use -ftemplate-depth=N to increase recursive template instantiation depth">; +def note_template_recursion_depth + : Note<"use -ftemplate-depth=N to increase recursive template " + "instantiation depth">, + NoSFINAE; def err_template_instantiate_within_definition : Error< "%select{implicit|explicit}0 instantiation of template %1 within its" @@ -11761,6 +11763,18 @@ def note_omp_implicit_dsa : Note< "implicitly determined as %0">; def err_omp_loop_var_dsa : Error< "loop iteration variable in the associated loop of 'omp %1' directive may not be %0, predetermined as %2">; +def err_omp_not_a_loop_sequence + : Error<"statement after '#pragma omp %0' must be a loop sequence " + "containing canonical loops or loop-generating constructs">; +def err_omp_empty_loop_sequence + : Error<"loop sequence after '#pragma omp %0' must contain at least 1 " + "canonical loop or loop-generating construct">; +def err_omp_invalid_looprange + : Error<"looprange clause selects loops from %1 to %2 but this exceeds the " + "number of loops (%3) in the loop sequence">; +def warn_omp_redundant_fusion : Warning<"looprange clause selects a single " + "loop, resulting in redundant fusion">, + InGroup; def err_omp_not_for : Error< "%select{statement after '#pragma omp %1' must be a for loop|" "expected %2 for loops after '#pragma omp %1'%select{|, but found only %4}3}0">; diff --git a/clang/include/clang/Basic/OpenMPKinds.h b/clang/include/clang/Basic/OpenMPKinds.h index 4c988e0dacb57..ed89a31e2684b 100644 --- a/clang/include/clang/Basic/OpenMPKinds.h +++ b/clang/include/clang/Basic/OpenMPKinds.h @@ -391,6 +391,13 @@ bool isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind); bool isOpenMPCanonicalLoopNestTransformationDirective( OpenMPDirectiveKind DKind); +/// Checks if the specified directive is a loop transformation directive that +/// applies to a canonical loop sequence. +/// \param DKind Specified directive. +/// \return True iff the directive is a loop transformation. +bool isOpenMPCanonicalLoopSequenceTransformationDirective( + OpenMPDirectiveKind DKind); + /// Checks if the specified directive is a loop transformation directive. /// \param DKind Specified directive. /// \return True iff the directive is a loop transformation. diff --git a/clang/include/clang/Basic/StmtNodes.td b/clang/include/clang/Basic/StmtNodes.td index dd1a24405fae7..bf3686bb372d5 100644 --- a/clang/include/clang/Basic/StmtNodes.td +++ b/clang/include/clang/Basic/StmtNodes.td @@ -238,6 +238,10 @@ def OMPUnrollDirective : StmtNode; def OMPReverseDirective : StmtNode; def OMPInterchangeDirective : StmtNode; +def OMPCanonicalLoopSequenceTransformationDirective + : StmtNode; +def OMPFuseDirective + : StmtNode; def OMPForDirective : StmtNode; def OMPForSimdDirective : StmtNode; def OMPSectionsDirective : StmtNode; diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h index e5c5ada3b0858..ceb16174e13e7 100644 --- a/clang/include/clang/Basic/TargetInfo.h +++ b/clang/include/clang/Basic/TargetInfo.h @@ -1259,6 +1259,10 @@ class TargetInfo : public TransferrableTargetInfo, ArrayRef OutputConstraints, unsigned &Index) const; + std::string + simplifyConstraint(StringRef Constraint, + SmallVectorImpl *OutCons = nullptr) const; + // Constraint parm will be left pointing at the last character of // the constraint. In practice, it won't be changed unless the // constraint is longer than one character. diff --git a/clang/include/clang/CodeGen/BackendUtil.h b/clang/include/clang/CodeGen/BackendUtil.h index 92e0d13bf25b6..8b0d975a876e6 100644 --- a/clang/include/clang/CodeGen/BackendUtil.h +++ b/clang/include/clang/CodeGen/BackendUtil.h @@ -49,7 +49,7 @@ void EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts, llvm::MemoryBufferRef Buf); void EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts, - DiagnosticsEngine &Diags); + llvm::vfs::FileSystem &VFS, DiagnosticsEngine &Diags); } // namespace clang #endif diff --git a/clang/include/clang/Driver/CommonArgs.h b/clang/include/clang/Driver/CommonArgs.h index 1464ce4e1b31b..40ae40665b040 100644 --- a/clang/include/clang/Driver/CommonArgs.h +++ b/clang/include/clang/Driver/CommonArgs.h @@ -105,6 +105,16 @@ unsigned DwarfVersionNum(StringRef ArgValue); const llvm::opt::Arg *getDwarfNArg(const llvm::opt::ArgList &Args); unsigned getDwarfVersion(const ToolChain &TC, const llvm::opt::ArgList &Args); +enum class DwarfFissionKind { None, Split, Single }; + +DwarfFissionKind getDebugFissionKind(const Driver &D, + const llvm::opt::ArgList &Args, + llvm::opt::Arg *&Arg); + +bool checkDebugInfoOption(const llvm::opt::Arg *A, + const llvm::opt::ArgList &Args, const Driver &D, + const ToolChain &TC); + void AddAssemblerKPIC(const ToolChain &ToolChain, const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs); diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 77f19a240a7f9..6245cf33a0719 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -3837,10 +3837,26 @@ let Visibility = [ClangOption, CC1Option, FC1Option, FlangOption] in { let Group = f_Group in { def fopenmp_target_debug_EQ : Joined<["-"], "fopenmp-target-debug=">; -def fopenmp_assume_teams_oversubscription : Flag<["-"], "fopenmp-assume-teams-oversubscription">; -def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">; -def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">; -def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">; +def fopenmp_assume_teams_oversubscription : Flag<["-"], "fopenmp-assume-teams-oversubscription">, + HelpText<"Allow the optimizer to discretely increase the number of " + "teams. May cause ignore environment variables that set " + "the number of teams to be ignored. The combination of " + "-fopenmp-assume-teams-oversubscription " + "and -fopenmp-assume-threads-oversubscription " + "may allow the conversion of loops into sequential code by " + "ensuring that each team/thread executes at most one iteration.">; +def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">, + HelpText<"Allow the optimizer to discretely increase the number of " + "threads. May cause ignore environment variables that set " + "the number of threads to be ignored. The combination of " + "-fopenmp-assume-teams-oversubscription " + "and -fopenmp-assume-threads-oversubscription " + "may allow the conversion of loops into sequential code by " + "ensuring that each team/thread executes at most one iteration.">; +def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">, + HelpText<"Do not assume teams oversubscription.">; +def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">, + HelpText<"Do not assume threads oversubscription.">; def fopenmp_assume_no_thread_state : Flag<["-"], "fopenmp-assume-no-thread-state">, HelpText<"Assert no thread in a parallel region modifies an ICV">, MarshallingInfoFlag>; @@ -4754,13 +4770,13 @@ defm column_info : BoolOption<"g", "column-info", PosFlag, BothFlags<[], [ClangOption, CLOption, DXCOption]>>, Group; def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group, - Visibility<[ClangOption, CLOption, DXCOption]>; + Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>; def gsplit_dwarf_EQ : Joined<["-"], "gsplit-dwarf=">, Group, - Visibility<[ClangOption, CLOption, DXCOption]>, + Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>, HelpText<"Set DWARF fission mode">, Values<"split,single">; def gno_split_dwarf : Flag<["-"], "gno-split-dwarf">, Group, - Visibility<[ClangOption, CLOption, DXCOption]>; + Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>; def gtemplate_alias : Flag<["-"], "gtemplate-alias">, Group, Visibility<[ClangOption, CC1Option]>; def gno_template_alias : Flag<["-"], "gno-template-alias">, Group, Visibility<[ClangOption]>; def gsimple_template_names : Flag<["-"], "gsimple-template-names">, Group; @@ -8405,7 +8421,7 @@ def main_file_name : Separate<["-"], "main-file-name">, MarshallingInfoString>; def split_dwarf_output : Separate<["-"], "split-dwarf-output">, HelpText<"File name to use for split dwarf debug info output">, - Visibility<[CC1Option, CC1AsOption]>, + Visibility<[CC1Option, CC1AsOption, FC1Option]>, MarshallingInfoString>; let Visibility = [CC1Option, FC1Option] in { @@ -8437,6 +8453,10 @@ def dependent_lib : Joined<["--"], "dependent-lib=">, HelpText<"Add dependent library">, MarshallingInfoStringVector>; +def split_dwarf_file : Separate<["-"], "split-dwarf-file">, + HelpText<"Name of the split dwarf debug info file to encode in the object file">, + MarshallingInfoString>; + } // let Visibility = [CC1Option, FC1Option] let Visibility = [CC1Option] in { @@ -8447,9 +8467,6 @@ def fblocks_runtime_optional : Flag<["-"], "fblocks-runtime-optional">, def fexternc_nounwind : Flag<["-"], "fexternc-nounwind">, HelpText<"Assume all functions with C linkage do not unwind">, MarshallingInfoFlag>; -def split_dwarf_file : Separate<["-"], "split-dwarf-file">, - HelpText<"Name of the split dwarf debug info file to encode in the object file">, - MarshallingInfoString>; def fno_wchar : Flag<["-"], "fno-wchar">, HelpText<"Disable C++ builtin type wchar_t">, MarshallingInfoNegativeFlag, cplusplus.KeyPath>, diff --git a/clang/include/clang/Frontend/Utils.h b/clang/include/clang/Frontend/Utils.h index f86c2f5074de0..49fd920d1ec43 100644 --- a/clang/include/clang/Frontend/Utils.h +++ b/clang/include/clang/Frontend/Utils.h @@ -143,8 +143,9 @@ class ModuleDependencyCollector : public DependencyCollector { std::error_code copyToRoot(StringRef Src, StringRef Dst = {}); public: - ModuleDependencyCollector(std::string DestDir) - : DestDir(std::move(DestDir)) {} + ModuleDependencyCollector(std::string DestDir, + IntrusiveRefCntPtr VFS) + : DestDir(std::move(DestDir)), Canonicalizer(std::move(VFS)) {} ~ModuleDependencyCollector() override { writeFileMap(); } StringRef getDest() { return DestDir; } diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h index 30edd303e1824..e301cf1080977 100644 --- a/clang/include/clang/Parse/Parser.h +++ b/clang/include/clang/Parse/Parser.h @@ -6767,6 +6767,9 @@ class Parser : public CodeCompletionHandler { OpenMPClauseKind Kind, bool ParseOnly); + /// Parses the 'looprange' clause of a '#pragma omp fuse' directive. + OMPClause *ParseOpenMPLoopRangeClause(); + /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h index 5edfc29d93781..f53aafdeb4f36 100644 --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -11714,6 +11714,23 @@ class Sema final : public SemaBase { const TemplateArgumentListInfo *TemplateArgs, bool IsAddressOfOperand); + UnsignedOrNone getPackIndex(TemplateArgument Pack) const { + return Pack.pack_size() - 1 - *ArgPackSubstIndex; + } + + TemplateArgument + getPackSubstitutedTemplateArgument(TemplateArgument Arg) const { + Arg = Arg.pack_elements()[*ArgPackSubstIndex]; + if (Arg.isPackExpansion()) + Arg = Arg.getPackExpansionPattern(); + return Arg; + } + + ExprResult BuildSubstNonTypeTemplateParmExpr( + Decl *AssociatedDecl, const NonTypeTemplateParmDecl *NTTP, + SourceLocation loc, TemplateArgument Replacement, + UnsignedOrNone PackIndex, bool Final); + /// Form a template name from a name that is syntactically required to name a /// template, either due to use of the 'template' keyword or because a name in /// this syntactic context is assumed to name a template (C++ @@ -13318,8 +13335,6 @@ class Sema final : public SemaBase { Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; - bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, - SourceRange InstantiationRange); InstantiatingTemplate(Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, @@ -13512,7 +13527,7 @@ class Sema final : public SemaBase { ~ArgPackSubstIndexRAII() { Self.ArgPackSubstIndex = OldSubstIndex; } }; - void pushCodeSynthesisContext(CodeSynthesisContext Ctx); + bool pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); void PrintContextStack(InstantiationContextDiagFuncRef DiagFunc) { diff --git a/clang/include/clang/Sema/SemaOpenMP.h b/clang/include/clang/Sema/SemaOpenMP.h index c0fd7a6d63611..daf58b18a03cb 100644 --- a/clang/include/clang/Sema/SemaOpenMP.h +++ b/clang/include/clang/Sema/SemaOpenMP.h @@ -463,6 +463,13 @@ class SemaOpenMP : public SemaBase { Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); + + /// Called on well-formed '#pragma omp fuse' after parsing of its + /// clauses and the associated statement. + StmtResult ActOnOpenMPFuseDirective(ArrayRef Clauses, + Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc); + /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult @@ -921,6 +928,12 @@ class SemaOpenMP : public SemaBase { SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); + + /// Called on well-form 'looprange' clause after parsing its arguments. + OMPClause * + ActOnOpenMPLoopRangeClause(Expr *First, Expr *Count, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation FirstLoc, + SourceLocation CountLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, @@ -1485,7 +1498,81 @@ class SemaOpenMP : public SemaBase { bool checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl &LoopHelpers, - Stmt *&Body, SmallVectorImpl> &OriginalInits); + Stmt *&Body, SmallVectorImpl> &OriginalInits); + + /// Holds the result of the analysis of a (possibly canonical) loop. + struct LoopAnalysis { + /// The analyzed loop or loop transformation. + Stmt *AStmt = nullptr; + /// Loop analyses results. + OMPLoopBasedDirective::HelperExprs HelperExprs; + /// The for-statement of the loop. TheForStmt equals AStmt only when the + /// latter is a canonical loop (i.e. not a loop transformation). + Stmt *TheForStmt = nullptr; + /// Initialization statements before transformations. + SmallVector OriginalInits; + /// Initialization statements required after transformation of this loop. + SmallVector TransformsPreInits; + + explicit LoopAnalysis(Stmt *S) : AStmt(S) {} + + bool isRegularLoop() const { return isRegularLoop(AStmt); } + bool isLoopTransformation() const { return isLoopTransformation(AStmt); } + + // Convenience functions used when building LoopSequenceAnalysis. + static bool isRegularLoop(Stmt *S) { + return isa(S); + } + static bool isLoopTransformation(Stmt *S) { + return isa(S); + } + }; + + /// Holds the result of the analysis of a (possibly canonical) loop sequence. + struct LoopSequenceAnalysis { + /// Number of top level canonical loops. + unsigned LoopSeqSize = 0; + /// For each loop results of the analysis. + SmallVector Loops; + /// Additional code required before entering the transformed loop sequence. + SmallVector LoopSequencePreInits; + + // Convenience function used when building the LoopSequenceAnalysis. + static bool isLoopSequenceDerivation(Stmt *S) { + return LoopAnalysis::isRegularLoop(S) || + LoopAnalysis::isLoopTransformation(S); + } + }; + + /// The main recursive process of `checkTransformableLoopSequence` that + /// performs grammatical parsing of a canonical loop sequence. It extracts + /// key information, such as the number of top-level loops, loop statements, + /// helper expressions, and other relevant loop-related data, all in a single + /// execution to avoid redundant traversals. This analysis flattens inner + /// Loop Sequences + /// + /// \param LoopSeqStmt The AST of the original statement. + /// \param SeqAnalysis [out] Result of the analysis of \p LoopSeqStmt + /// \param Context + /// \param Kind The loop transformation directive kind. + /// \return Whether the original statement is both syntactically and + /// semantically correct according to OpenMP 6.0 canonical loop + /// sequence definition. + bool analyzeLoopSequence(Stmt *LoopSeqStmt, LoopSequenceAnalysis &SeqAnalysis, + ASTContext &Context, OpenMPDirectiveKind Kind); + + /// Validates and checks whether a loop sequence can be transformed according + /// to the given directive, providing necessary setup and initialization + /// (Driver function) before recursion using `analyzeLoopSequence`. + /// + /// \param Kind The loop transformation directive kind. + /// \param AStmt The AST of the original statement + /// \param SeqAnalysis [out] Result of the analysis of \p LoopSeqStmt + /// \param Context + /// \return Whether there was an absence of errors or not + bool checkTransformableLoopSequence(OpenMPDirectiveKind Kind, Stmt *AStmt, + LoopSequenceAnalysis &SeqAnalysis, + ASTContext &Context); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h index 441047d64f48c..99864c7373908 100644 --- a/clang/include/clang/Serialization/ASTBitCodes.h +++ b/clang/include/clang/Serialization/ASTBitCodes.h @@ -1951,6 +1951,7 @@ enum StmtCode { STMT_OMP_UNROLL_DIRECTIVE, STMT_OMP_REVERSE_DIRECTIVE, STMT_OMP_INTERCHANGE_DIRECTIVE, + STMT_OMP_FUSE_DIRECTIVE, STMT_OMP_FOR_DIRECTIVE, STMT_OMP_FOR_SIMD_DIRECTIVE, STMT_OMP_SECTIONS_DIRECTIVE, diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index 97c59b2ceec2f..61dd330553860 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -4567,6 +4567,10 @@ QualType ASTContext::getWebAssemblyExternrefType() const { /// type. QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, unsigned NumFields) const { + auto K = llvm::ScalableVecTyKey{EltTy, NumElts, NumFields}; + if (auto It = ScalableVecTyMap.find(K); It != ScalableVecTyMap.end()) + return It->second; + if (Target->hasAArch64ACLETypes()) { uint64_t EltTySize = getTypeSize(EltTy); @@ -4575,29 +4579,29 @@ QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, if (EltTy->hasIntegerRepresentation() && !EltTy->isBooleanType() && \ EltTy->hasSignedIntegerRepresentation() == IsSigned && \ EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \ - return SingletonId; \ + return ScalableVecTyMap[K] = SingletonId; \ } #define SVE_VECTOR_TYPE_FLOAT(Name, MangledName, Id, SingletonId, NumEls, \ ElBits, NF) \ if (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \ - return SingletonId; \ + return ScalableVecTyMap[K] = SingletonId; \ } #define SVE_VECTOR_TYPE_BFLOAT(Name, MangledName, Id, SingletonId, NumEls, \ ElBits, NF) \ if (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ EltTySize == ElBits && NumElts == (NumEls * NF) && NumFields == 1) { \ - return SingletonId; \ + return ScalableVecTyMap[K] = SingletonId; \ } #define SVE_VECTOR_TYPE_MFLOAT(Name, MangledName, Id, SingletonId, NumEls, \ ElBits, NF) \ if (EltTy->isMFloat8Type() && EltTySize == ElBits && \ NumElts == (NumEls * NF) && NumFields == 1) { \ - return SingletonId; \ + return ScalableVecTyMap[K] = SingletonId; \ } #define SVE_PREDICATE_TYPE_ALL(Name, MangledName, Id, SingletonId, NumEls, NF) \ if (EltTy->isBooleanType() && NumElts == (NumEls * NF) && NumFields == 1) \ - return SingletonId; + return ScalableVecTyMap[K] = SingletonId; #include "clang/Basic/AArch64ACLETypes.def" } else if (Target->hasRISCVVTypes()) { uint64_t EltTySize = getTypeSize(EltTy); @@ -4611,10 +4615,10 @@ QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ IsBF && !IsFP)) && \ EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ - return SingletonId; + return ScalableVecTyMap[K] = SingletonId; #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ if (EltTy->isBooleanType() && NumElts == NumEls) \ - return SingletonId; + return ScalableVecTyMap[K] = SingletonId; #include "clang/Basic/RISCVVTypes.def" } return QualType(); @@ -5869,8 +5873,14 @@ ASTContext::getSubstBuiltinTemplatePack(const TemplateArgument &ArgPack) { QualType Canon; TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); - if (!CanonArgPack.structurallyEquals(ArgPack)) + if (!CanonArgPack.structurallyEquals(ArgPack)) { Canon = getSubstBuiltinTemplatePack(CanonArgPack); + // Refresh InsertPos, in case the recursive call above caused rehashing, + // which would invalidate the bucket pointer. + [[maybe_unused]] const auto *Nothing = + SubstBuiltinTemplatePackTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!Nothing); + } auto *PackType = new (*this, alignof(SubstBuiltinTemplatePackType)) SubstBuiltinTemplatePackType(Canon, ArgPack); diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index b4da99957ee88..0b7b6cd64dd97 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -2934,8 +2934,9 @@ bool Compiler::VisitMaterializeTemporaryExpr( // For everyhing else, use local variables. if (SubExprT) { bool IsConst = SubExpr->getType().isConstQualified(); - unsigned LocalIndex = - allocateLocalPrimitive(E, *SubExprT, IsConst, E->getExtendingDecl()); + bool IsVolatile = SubExpr->getType().isVolatileQualified(); + unsigned LocalIndex = allocateLocalPrimitive( + E, *SubExprT, IsConst, IsVolatile, E->getExtendingDecl()); if (!this->visit(SubExpr)) return false; if (!this->emitSetLocal(*SubExprT, LocalIndex, E)) @@ -4452,6 +4453,9 @@ bool Compiler::visitAssignment(const Expr *LHS, const Expr *RHS, if (!this->visit(LHS)) return false; + if (LHS->getType().isVolatileQualified()) + return this->emitInvalidStore(LHS->getType().getTypePtr(), E); + // We don't support assignments in C. if (!Ctx.getLangOpts().CPlusPlus && !this->emitInvalid(E)) return false; @@ -4560,13 +4564,14 @@ bool Compiler::emitConst(const APSInt &Value, const Expr *E) { template unsigned Compiler::allocateLocalPrimitive( - DeclTy &&Src, PrimType Ty, bool IsConst, const ValueDecl *ExtendingDecl, - ScopeKind SC, bool IsConstexprUnknown) { + DeclTy &&Src, PrimType Ty, bool IsConst, bool IsVolatile, + const ValueDecl *ExtendingDecl, ScopeKind SC, bool IsConstexprUnknown) { // FIXME: There are cases where Src.is() is wrong, e.g. // (int){12} in C. Consider using Expr::isTemporaryObject() instead // or isa(). Descriptor *D = P.createDescriptor(Src, Ty, nullptr, Descriptor::InlineDescMD, - IsConst, isa(Src)); + IsConst, isa(Src), + /*IsMutable=*/false, IsVolatile); D->IsConstexprUnknown = IsConstexprUnknown; Scope::Local Local = this->createLocal(D); if (auto *VD = dyn_cast_if_present(Src.dyn_cast())) @@ -4874,7 +4879,8 @@ Compiler::visitVarDecl(const VarDecl *VD, const Expr *Init, if (VarT) { unsigned Offset = this->allocateLocalPrimitive( - VD, *VarT, VD->getType().isConstQualified(), nullptr, ScopeKind::Block, + VD, *VarT, VD->getType().isConstQualified(), + VD->getType().isVolatileQualified(), nullptr, ScopeKind::Block, IsConstexprUnknown); if (Init) { // If this is a toplevel declaration, create a scope for the diff --git a/clang/lib/AST/ByteCode/Compiler.h b/clang/lib/AST/ByteCode/Compiler.h index 09599b3547888..5c46f75af4da3 100644 --- a/clang/lib/AST/ByteCode/Compiler.h +++ b/clang/lib/AST/ByteCode/Compiler.h @@ -327,6 +327,7 @@ class Compiler : public ConstStmtVisitor, bool>, /// Creates a local primitive value. unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsConst, + bool IsVolatile = false, const ValueDecl *ExtendingDecl = nullptr, ScopeKind SC = ScopeKind::Block, bool IsConstexprUnknown = false); diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp index 306f95c479d0f..683e916391337 100644 --- a/clang/lib/AST/ByteCode/Context.cpp +++ b/clang/lib/AST/ByteCode/Context.cpp @@ -567,9 +567,15 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) { // Assign descriptors to all parameters. // Composite objects are lowered to pointers. for (const ParmVarDecl *PD : FuncDecl->parameters()) { + bool IsConst = PD->getType().isConstQualified(); + bool IsVolatile = PD->getType().isVolatileQualified(); + OptPrimType T = classify(PD->getType()); PrimType PT = T.value_or(PT_Ptr); - Descriptor *Desc = P->createDescriptor(PD, PT); + Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt, + IsConst, /*IsTemporary=*/false, + /*IsMutable=*/false, IsVolatile); + ParamDescriptors.insert({ParamOffset, {PT, Desc}}); ParamOffsets.push_back(ParamOffset); ParamOffset += align(primSize(PT)); @@ -595,9 +601,14 @@ const Function *Context::getOrCreateObjCBlock(const BlockExpr *E) { // Assign descriptors to all parameters. // Composite objects are lowered to pointers. for (const ParmVarDecl *PD : BD->parameters()) { + bool IsConst = PD->getType().isConstQualified(); + bool IsVolatile = PD->getType().isVolatileQualified(); + OptPrimType T = classify(PD->getType()); PrimType PT = T.value_or(PT_Ptr); - Descriptor *Desc = P->createDescriptor(PD, PT); + Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt, + IsConst, /*IsTemporary=*/false, + /*IsMutable=*/false, IsVolatile); ParamDescriptors.insert({ParamOffset, {PT, Desc}}); ParamOffsets.push_back(ParamOffset); ParamOffset += align(primSize(PT)); diff --git a/clang/lib/AST/ByteCode/EvalEmitter.h b/clang/lib/AST/ByteCode/EvalEmitter.h index e81ea67adf97a..a9f87db5d7f8d 100644 --- a/clang/lib/AST/ByteCode/EvalEmitter.h +++ b/clang/lib/AST/ByteCode/EvalEmitter.h @@ -16,6 +16,7 @@ #include "EvaluationResult.h" #include "InterpState.h" #include "PrimType.h" +#include "Record.h" #include "Source.h" namespace clang { diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index 8aaefc70e506e..21af3d6ac7f90 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -889,6 +889,8 @@ bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { return false; if (!CheckConst(S, OpPC, Ptr)) return false; + if (!CheckVolatile(S, OpPC, Ptr, AK_Assign)) + return false; if (!S.inConstantContext() && isConstexprUnknown(Ptr)) return false; return true; diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index 7867a0669b472..bb0c4580b14a9 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -1730,9 +1730,8 @@ inline bool GetPtrLocal(InterpState &S, CodePtr OpPC, uint32_t I) { } inline bool GetPtrParam(InterpState &S, CodePtr OpPC, uint32_t I) { - if (S.checkingPotentialConstantExpression()) { + if (S.Current->isBottomFrame()) return false; - } S.Stk.push(S.Current->getParamPointer(I)); return true; } @@ -3344,6 +3343,18 @@ inline bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind, return false; } +inline bool InvalidStore(InterpState &S, CodePtr OpPC, const Type *T) { + if (S.getLangOpts().CPlusPlus) { + QualType VolatileType = QualType(T, 0).withVolatile(); + S.FFDiag(S.Current->getSource(OpPC), + diag::note_constexpr_access_volatile_type) + << AK_Assign << VolatileType; + } else { + S.FFDiag(S.Current->getSource(OpPC)); + } + return false; +} + inline bool InvalidDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR, bool InitializerFailed) { assert(DR); diff --git a/clang/lib/AST/ByteCode/InterpBlock.h b/clang/lib/AST/ByteCode/InterpBlock.h index ea9f44c38842e..9b3dadca6cc14 100644 --- a/clang/lib/AST/ByteCode/InterpBlock.h +++ b/clang/lib/AST/ByteCode/InterpBlock.h @@ -115,9 +115,10 @@ class Block final { return reinterpret_cast(this) + sizeof(Block); } - template T deref() const { + template const T &deref() const { return *reinterpret_cast(data()); } + template T &deref() { return *reinterpret_cast(data()); } /// Invokes the constructor. void invokeCtor() { diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 9076946d29657..891344d4e6ed0 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -563,9 +563,9 @@ static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, case Builtin::BI__builtin_islessequal: return LHS <= RHS; case Builtin::BI__builtin_islessgreater: { - ComparisonCategoryResult cmp = LHS.compare(RHS); - return cmp == ComparisonCategoryResult::Less || - cmp == ComparisonCategoryResult::Greater; + ComparisonCategoryResult Cmp = LHS.compare(RHS); + return Cmp == ComparisonCategoryResult::Less || + Cmp == ComparisonCategoryResult::Greater; } case Builtin::BI__builtin_isunordered: return LHS.compare(RHS) == ComparisonCategoryResult::Unordered; @@ -583,8 +583,7 @@ static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType()); - APSInt FPClassArg = popToAPSInt(S.Stk, FPClassArgT); + APSInt FPClassArg = popToAPSInt(S, Call->getArg(1)); const Floating &F = S.Stk.pop(); int32_t Result = static_cast( @@ -655,8 +654,7 @@ static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Val = popToAPSInt(S.Stk, ArgT); + APSInt Val = popToAPSInt(S, Call->getArg(0)); if (Val == APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false)) return false; @@ -674,8 +672,7 @@ static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const Pointer &Arg = S.Stk.pop(); Val = convertBoolVectorToInt(Arg); } else { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - Val = popToAPSInt(S.Stk, ArgT); + Val = popToAPSInt(S, Call->getArg(0)); } pushInteger(S, Val.popcount(), Call->getType()); return true; @@ -684,8 +681,7 @@ static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Val = popToAPSInt(S.Stk, ArgT); + APSInt Val = popToAPSInt(S, Call->getArg(0)); pushInteger(S, Val.popcount() % 2, Call->getType()); return true; } @@ -693,8 +689,7 @@ static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Val = popToAPSInt(S.Stk, ArgT); + APSInt Val = popToAPSInt(S, Call->getArg(0)); pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType()); return true; } @@ -702,8 +697,7 @@ static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Val = popToAPSInt(S.Stk, ArgT); + APSInt Val = popToAPSInt(S, Call->getArg(0)); pushInteger(S, Val.reverseBits(), Call->getType()); return true; } @@ -746,11 +740,8 @@ static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Right) { - PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType()); - PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType()); - - APSInt Amount = popToAPSInt(S.Stk, AmountT); - APSInt Value = popToAPSInt(S.Stk, ValueT); + APSInt Amount = popToAPSInt(S, Call->getArg(1)); + APSInt Value = popToAPSInt(S, Call->getArg(0)); APSInt Result; if (Right) @@ -767,8 +758,7 @@ static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Value = popToAPSInt(S.Stk, ArgT); + APSInt Value = popToAPSInt(S, Call->getArg(0)); uint64_t N = Value.countr_zero(); pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType()); @@ -796,8 +786,7 @@ static bool interp__builtin_move(InterpState &S, CodePtr OpPC, static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); - APSInt Arg = popToAPSInt(S.Stk, ArgT); + APSInt Arg = popToAPSInt(S, Call->getArg(0)); int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber( Arg.getZExtValue()); @@ -971,17 +960,15 @@ static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, unsigned BuiltinOp) { std::optional Fallback; - if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) { - PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); - Fallback = popToAPSInt(S.Stk, FallbackT); - } + if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2) + Fallback = popToAPSInt(S, Call->getArg(1)); + APSInt Val; if (Call->getArg(0)->getType()->isExtVectorBoolType()) { const Pointer &Arg = S.Stk.pop(); Val = convertBoolVectorToInt(Arg); } else { - PrimType ValT = *S.getContext().classify(Call->getArg(0)); - Val = popToAPSInt(S.Stk, ValT); + Val = popToAPSInt(S, Call->getArg(0)); } // When the argument is 0, the result of GCC builtins is undefined, whereas @@ -1008,17 +995,15 @@ static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID) { std::optional Fallback; - if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) { - PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); - Fallback = popToAPSInt(S.Stk, FallbackT); - } + if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2) + Fallback = popToAPSInt(S, Call->getArg(1)); + APSInt Val; if (Call->getArg(0)->getType()->isExtVectorBoolType()) { const Pointer &Arg = S.Stk.pop(); Val = convertBoolVectorToInt(Arg); } else { - PrimType ValT = *S.getContext().classify(Call->getArg(0)); - Val = popToAPSInt(S.Stk, ValT); + Val = popToAPSInt(S, Call->getArg(0)); } if (Val == 0) { @@ -1036,13 +1021,10 @@ static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ReturnT = *S.getContext().classify(Call->getType()); - PrimType ValT = *S.getContext().classify(Call->getArg(0)); - const APSInt &Val = popToAPSInt(S.Stk, ValT); + const APSInt &Val = popToAPSInt(S, Call->getArg(0)); assert(Val.getActiveBits() <= 64); - INT_TYPE_SWITCH(ReturnT, - { S.Stk.push(T::from(Val.byteSwap().getZExtValue())); }); + pushInteger(S, Val.byteSwap(), Call->getType()); return true; } @@ -1057,9 +1039,8 @@ static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, return true; }; - PrimType ValT = *S.getContext().classify(Call->getArg(0)); const Pointer &Ptr = S.Stk.pop(); - const APSInt &SizeVal = popToAPSInt(S.Stk, ValT); + const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0)); // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power // of two less than or equal to the maximum inline atomic width, we know it @@ -1125,21 +1106,17 @@ static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { - PrimType ValT = *S.getContext().classify(Call->getArg(0)); - const APSInt &SizeVal = popToAPSInt(S.Stk, ValT); - - auto returnBool = [&S](bool Value) -> bool { - S.Stk.push(Value); - return true; - }; + const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0)); CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue()); if (Size.isPowerOfTwo()) { // Check against inlining width. unsigned InlineWidthBits = S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth(); - if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) - return returnBool(true); + if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) { + S.Stk.push(true); + return true; + } } return false; // returnBool(false); @@ -1169,8 +1146,7 @@ static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp) { - PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1)); - const APSInt &Alignment = popToAPSInt(S.Stk, AlignmentT); + const APSInt &Alignment = popToAPSInt(S, Call->getArg(1)); if (Alignment < 0 || !Alignment.isPowerOf2()) { S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment; @@ -1184,8 +1160,7 @@ static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, return false; } - // The first parameter is either an integer or a pointer (but not a function - // pointer). + // The first parameter is either an integer or a pointer. PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0)); if (isIntegralType(FirstArgT)) { @@ -1204,12 +1179,12 @@ static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, } return true; } - assert(FirstArgT == PT_Ptr); const Pointer &Ptr = S.Stk.pop(); + if (!Ptr.isBlockPointer()) + return false; - unsigned PtrOffset = Ptr.getByteOffset(); - PtrOffset = Ptr.getIndex(); + unsigned PtrOffset = Ptr.getIndex(); CharUnits BaseAlignment = S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl()); CharUnits PtrAlign = @@ -1326,10 +1301,8 @@ static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, !Call->getArg(1)->getType()->isIntegerType()) return false; - PrimType ValT = *S.Ctx.classify(Call->getArg(0)); - PrimType IndexT = *S.Ctx.classify(Call->getArg(1)); - APSInt Index = popToAPSInt(S.Stk, IndexT); - APSInt Val = popToAPSInt(S.Stk, ValT); + APSInt Index = popToAPSInt(S, Call->getArg(1)); + APSInt Val = popToAPSInt(S, Call->getArg(0)); unsigned BitWidth = Val.getBitWidth(); uint64_t Shift = Index.extractBitsAsZExtValue(8, 0); diff --git a/clang/lib/AST/ByteCode/InterpFrame.cpp b/clang/lib/AST/ByteCode/InterpFrame.cpp index a3db0d7a29cfa..039acb5d72b2c 100644 --- a/clang/lib/AST/ByteCode/InterpFrame.cpp +++ b/clang/lib/AST/ByteCode/InterpFrame.cpp @@ -231,6 +231,8 @@ Pointer InterpFrame::getParamPointer(unsigned Off) { if (auto Pt = Params.find(Off); Pt != Params.end()) return Pointer(reinterpret_cast(Pt->second.get())); + assert(!isBottomFrame()); + // Allocate memory to store the parameter and the block metadata. const auto &Desc = Func->getParamDescriptor(Off); size_t BlockSize = sizeof(Block) + Desc.second->getAllocSize(); diff --git a/clang/lib/AST/ByteCode/InterpFrame.h b/clang/lib/AST/ByteCode/InterpFrame.h index 3cdc164e4bdda..fa9de2e1e7c6d 100644 --- a/clang/lib/AST/ByteCode/InterpFrame.h +++ b/clang/lib/AST/ByteCode/InterpFrame.h @@ -14,7 +14,8 @@ #define LLVM_CLANG_AST_INTERP_INTERPFRAME_H #include "Frame.h" -#include "Program.h" +#include "InterpBlock.h" +#include "Pointer.h" namespace clang { namespace interp { @@ -93,7 +94,7 @@ class InterpFrame final : public Frame { auto Pt = Params.find(Offset); if (Pt == Params.end()) return stackRef(Offset); - return Pointer(reinterpret_cast(Pt->second.get())).deref(); + return reinterpret_cast(Pt->second.get())->deref(); } /// Mutates a local copy of a parameter. @@ -151,7 +152,7 @@ class InterpFrame final : public Frame { /// Returns an offset to a local. template T &localRef(unsigned Offset) const { - return getLocalPointer(Offset).deref(); + return localBlock(Offset)->deref(); } /// Returns a pointer to a local's block. diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td index 7af2df5318106..532c4448e6f40 100644 --- a/clang/lib/AST/ByteCode/Opcodes.td +++ b/clang/lib/AST/ByteCode/Opcodes.td @@ -797,6 +797,7 @@ def SideEffect : Opcode {} def InvalidCast : Opcode { let Args = [ArgCastKind, ArgBool]; } +def InvalidStore : Opcode { let Args = [ArgTypePtr]; } def CheckPseudoDtor : Opcode {} def InvalidDeclRef : Opcode { diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp index 81d4ce14f9310..663134c8696de 100644 --- a/clang/lib/AST/ByteCode/Pointer.cpp +++ b/clang/lib/AST/ByteCode/Pointer.cpp @@ -110,19 +110,21 @@ Pointer &Pointer::operator=(const Pointer &P) { StorageKind = P.StorageKind; Offset = P.Offset; - if (P.isBlockPointer()) { + switch (StorageKind) { + case Storage::Int: + Int = P.Int; + break; + case Storage::Block: BS = P.BS; if (BS.Pointee) BS.Pointee->addPointer(this); - } else if (P.isIntegralPointer()) { - Int = P.Int; - } else if (P.isFunctionPointer()) { + break; + case Storage::Fn: Fn = P.Fn; - } else if (P.isTypeidPointer()) { + break; + case Storage::Typeid: Typeid = P.Typeid; - } else { - assert(false && "Unhandled storage kind"); } return *this; } @@ -147,19 +149,21 @@ Pointer &Pointer::operator=(Pointer &&P) { StorageKind = P.StorageKind; Offset = P.Offset; - if (P.isBlockPointer()) { + switch (StorageKind) { + case Storage::Int: + Int = P.Int; + break; + case Storage::Block: BS = P.BS; if (BS.Pointee) BS.Pointee->addPointer(this); - } else if (P.isIntegralPointer()) { - Int = P.Int; - } else if (P.isFunctionPointer()) { + break; + case Storage::Fn: Fn = P.Fn; - } else if (P.isTypeidPointer()) { + break; + case Storage::Typeid: Typeid = P.Typeid; - } else { - assert(false && "Unhandled storage kind"); } return *this; } @@ -358,13 +362,17 @@ void Pointer::print(llvm::raw_ostream &OS) const { } size_t Pointer::computeOffsetForComparison() const { - if (isIntegralPointer()) - return asIntPointer().Value + Offset; - if (isTypeidPointer()) + switch (StorageKind) { + case Storage::Int: + return Int.Value + Offset; + case Storage::Block: + // See below. + break; + case Storage::Fn: + return Fn.getIntegerRepresentation() + Offset; + case Storage::Typeid: return reinterpret_cast(asTypeidPointer().TypePtr) + Offset; - - if (!isBlockPointer()) - return Offset; + } size_t Result = 0; Pointer P = *this; diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h index bbf20801ce923..af89b66e9f875 100644 --- a/clang/lib/AST/ByteCode/Pointer.h +++ b/clang/lib/AST/ByteCode/Pointer.h @@ -56,7 +56,7 @@ struct TypeidPointer { const Type *TypeInfoType; }; -enum class Storage { Block, Int, Fn, Typeid }; +enum class Storage { Int, Block, Fn, Typeid }; /// A pointer to a memory block, live or dead. /// @@ -252,14 +252,16 @@ class Pointer { /// Checks if the pointer is null. bool isZero() const { - if (isBlockPointer()) + switch (StorageKind) { + case Storage::Int: + return Int.Value == 0 && Offset == 0; + case Storage::Block: return BS.Pointee == nullptr; - if (isFunctionPointer()) + case Storage::Fn: return Fn.isZero(); - if (isTypeidPointer()) + case Storage::Typeid: return false; - assert(isIntegralPointer()); - return Int.Value == 0 && Offset == 0; + } } /// Checks if the pointer is live. bool isLive() const { diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp index 97ae4a07f32aa..95de6a82a5270 100644 --- a/clang/lib/AST/ExprCXX.cpp +++ b/clang/lib/AST/ExprCXX.cpp @@ -1725,8 +1725,8 @@ SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context, return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs); } -NamedDecl *SubstNonTypeTemplateParmExpr::getParameter() const { - return cast( +NonTypeTemplateParmDecl *SubstNonTypeTemplateParmExpr::getParameter() const { + return cast( getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]); } diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp index 55b93e1eb4034..2ce4419940e52 100644 --- a/clang/lib/AST/OpenMPClause.cpp +++ b/clang/lib/AST/OpenMPClause.cpp @@ -1024,6 +1024,26 @@ OMPPartialClause *OMPPartialClause::CreateEmpty(const ASTContext &C) { return new (C) OMPPartialClause(); } +OMPLoopRangeClause * +OMPLoopRangeClause::Create(const ASTContext &C, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation FirstLoc, + SourceLocation CountLoc, SourceLocation EndLoc, + Expr *First, Expr *Count) { + OMPLoopRangeClause *Clause = CreateEmpty(C); + Clause->setLocStart(StartLoc); + Clause->setLParenLoc(LParenLoc); + Clause->setFirstLoc(FirstLoc); + Clause->setCountLoc(CountLoc); + Clause->setLocEnd(EndLoc); + Clause->setFirst(First); + Clause->setCount(Count); + return Clause; +} + +OMPLoopRangeClause *OMPLoopRangeClause::CreateEmpty(const ASTContext &C) { + return new (C) OMPLoopRangeClause(); +} + OMPAllocateClause *OMPAllocateClause::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, Expr *Alignment, SourceLocation ColonLoc, @@ -1964,6 +1984,21 @@ void OMPClausePrinter::VisitOMPPartialClause(OMPPartialClause *Node) { } } +void OMPClausePrinter::VisitOMPLoopRangeClause(OMPLoopRangeClause *Node) { + OS << "looprange"; + + Expr *First = Node->getFirst(); + Expr *Count = Node->getCount(); + + if (First && Count) { + OS << "("; + First->printPretty(OS, nullptr, Policy, 0); + OS << ","; + Count->printPretty(OS, nullptr, Policy, 0); + OS << ")"; + } +} + void OMPClausePrinter::VisitOMPAllocatorClause(OMPAllocatorClause *Node) { OS << "allocator("; Node->getAllocator()->printPretty(OS, nullptr, Policy, 0); diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp index 1f6586f95a9f8..a5b0cd3786a28 100644 --- a/clang/lib/AST/StmtOpenMP.cpp +++ b/clang/lib/AST/StmtOpenMP.cpp @@ -125,13 +125,12 @@ OMPLoopBasedDirective::tryToFindNextInnerLoop(Stmt *CurStmt, bool OMPLoopBasedDirective::doForAllLoops( Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref Callback, - llvm::function_ref + llvm::function_ref OnTransformationCallback) { CurStmt = CurStmt->IgnoreContainers(); for (unsigned Cnt = 0; Cnt < NumLoops; ++Cnt) { while (true) { - auto *Dir = - dyn_cast(CurStmt); + auto *Dir = dyn_cast(CurStmt); if (!Dir) break; @@ -371,6 +370,22 @@ OMPForDirective *OMPForDirective::Create( return Dir; } +Stmt *OMPLoopTransformationDirective::getTransformedStmt() const { + if (auto *D = dyn_cast(S)) + return D->getTransformedStmt(); + if (auto *D = dyn_cast(S)) + return D->getTransformedStmt(); + llvm_unreachable("unexpected object type"); +} + +Stmt *OMPLoopTransformationDirective::getPreInits() const { + if (auto *D = dyn_cast(S)) + return D->getPreInits(); + if (auto *D = dyn_cast(S)) + return D->getPreInits(); + llvm_unreachable("unexpected object type"); +} + Stmt *OMPCanonicalLoopNestTransformationDirective::getTransformedStmt() const { switch (getStmtClass()) { #define STMT(CLASS, PARENT) @@ -380,7 +395,7 @@ Stmt *OMPCanonicalLoopNestTransformationDirective::getTransformedStmt() const { return static_cast(this)->getTransformedStmt(); #include "clang/AST/StmtNodes.inc" default: - llvm_unreachable("Not a loop transformation"); + llvm_unreachable("Not a loop transformation for canonical loop nests"); } } @@ -393,7 +408,34 @@ Stmt *OMPCanonicalLoopNestTransformationDirective::getPreInits() const { return static_cast(this)->getPreInits(); #include "clang/AST/StmtNodes.inc" default: - llvm_unreachable("Not a loop transformation"); + llvm_unreachable("Not a loop transformation for canonical loop nests"); + } +} + +Stmt * +OMPCanonicalLoopSequenceTransformationDirective::getTransformedStmt() const { + switch (getStmtClass()) { +#define STMT(CLASS, PARENT) +#define ABSTRACT_STMT(CLASS) +#define OMPCANONICALLOOPSEQUENCETRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + case Stmt::CLASS##Class: \ + return static_cast(this)->getTransformedStmt(); +#include "clang/AST/StmtNodes.inc" + default: + llvm_unreachable("Not a loop transformation for canonical loop sequences"); + } +} + +Stmt *OMPCanonicalLoopSequenceTransformationDirective::getPreInits() const { + switch (getStmtClass()) { +#define STMT(CLASS, PARENT) +#define ABSTRACT_STMT(CLASS) +#define OMPCANONICALLOOPSEQUENCETRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + case Stmt::CLASS##Class: \ + return static_cast(this)->getPreInits(); +#include "clang/AST/StmtNodes.inc" + default: + llvm_unreachable("Not a loop transformation for canonical loop sequences"); } } @@ -510,6 +552,27 @@ OMPInterchangeDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses, SourceLocation(), SourceLocation(), NumLoops); } +OMPFuseDirective *OMPFuseDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + ArrayRef Clauses, unsigned NumGeneratedTopLevelLoops, + Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits) { + + OMPFuseDirective *Dir = createDirective( + C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc); + Dir->setTransformedStmt(TransformedStmt); + Dir->setPreInits(PreInits); + Dir->setNumGeneratedTopLevelLoops(NumGeneratedTopLevelLoops); + return Dir; +} + +OMPFuseDirective *OMPFuseDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses) { + OMPFuseDirective *Dir = createEmptyDirective( + C, NumClauses, /*HasAssociatedStmt=*/true, TransformedStmtOffset + 1, + SourceLocation(), SourceLocation()); + return Dir; +} + OMPForSimdDirective * OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp index 2c9c3581a2962..586c3000f105c 100644 --- a/clang/lib/AST/StmtPrinter.cpp +++ b/clang/lib/AST/StmtPrinter.cpp @@ -795,6 +795,11 @@ void StmtPrinter::VisitOMPInterchangeDirective(OMPInterchangeDirective *Node) { PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPFuseDirective(OMPFuseDirective *Node) { + Indent() << "#pragma omp fuse"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) { Indent() << "#pragma omp for"; PrintOMPExecutableDirective(Node); diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 37c4d43ec0b2f..589a156a2b6ea 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -510,6 +510,13 @@ void OMPClauseProfiler::VisitOMPPartialClause(const OMPPartialClause *C) { Profiler->VisitExpr(Factor); } +void OMPClauseProfiler::VisitOMPLoopRangeClause(const OMPLoopRangeClause *C) { + if (const Expr *First = C->getFirst()) + Profiler->VisitExpr(First); + if (const Expr *Count = C->getCount()) + Profiler->VisitExpr(Count); +} + void OMPClauseProfiler::VisitOMPAllocatorClause(const OMPAllocatorClause *C) { if (C->getAllocator()) Profiler->VisitStmt(C->getAllocator()); @@ -1025,6 +1032,15 @@ void StmtProfiler::VisitOMPInterchangeDirective( VisitOMPCanonicalLoopNestTransformationDirective(S); } +void StmtProfiler::VisitOMPCanonicalLoopSequenceTransformationDirective( + const OMPCanonicalLoopSequenceTransformationDirective *S) { + VisitOMPExecutableDirective(S); +} + +void StmtProfiler::VisitOMPFuseDirective(const OMPFuseDirective *S) { + VisitOMPCanonicalLoopSequenceTransformationDirective(S); +} + void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) { VisitOMPLoopDirective(S); } @@ -1353,7 +1369,8 @@ void StmtProfiler::VisitExpr(const Expr *S) { } void StmtProfiler::VisitConstantExpr(const ConstantExpr *S) { - VisitExpr(S); + // Profile exactly as the sub-expression. + Visit(S->getSubExpr()); } void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) { diff --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp index 387026e2d712f..64b2bff063340 100644 --- a/clang/lib/Basic/OpenMPKinds.cpp +++ b/clang/lib/Basic/OpenMPKinds.cpp @@ -282,6 +282,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str, case OMPC_affinity: case OMPC_when: case OMPC_append_args: + case OMPC_looprange: break; default: break; @@ -627,6 +628,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind, case OMPC_affinity: case OMPC_when: case OMPC_append_args: + case OMPC_looprange: break; default: break; @@ -755,9 +757,14 @@ bool clang::isOpenMPCanonicalLoopNestTransformationDirective( DKind == OMPD_interchange || DKind == OMPD_stripe; } +bool clang::isOpenMPCanonicalLoopSequenceTransformationDirective( + OpenMPDirectiveKind DKind) { + return DKind == OMPD_fuse; +} + bool clang::isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind) { - // FIXME: There will be more cases when we implement 'fuse'. - return isOpenMPCanonicalLoopNestTransformationDirective(DKind); + return isOpenMPCanonicalLoopNestTransformationDirective(DKind) || + isOpenMPCanonicalLoopSequenceTransformationDirective(DKind); } bool clang::isOpenMPCombinedParallelADirective(OpenMPDirectiveKind DKind) { diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp index 72ee09d209e02..f4d7c1288cc04 100644 --- a/clang/lib/Basic/TargetInfo.cpp +++ b/clang/lib/Basic/TargetInfo.cpp @@ -18,6 +18,7 @@ #include "clang/Basic/LangOptions.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/TargetParser/TargetParser.h" #include @@ -1042,3 +1043,51 @@ void TargetInfo::copyAuxTarget(const TargetInfo *Aux) { auto *Src = static_cast(Aux); *Target = *Src; } + +std::string +TargetInfo::simplifyConstraint(StringRef Constraint, + SmallVectorImpl *OutCons) const { + std::string Result; + + for (const char *I = Constraint.begin(), *E = Constraint.end(); I < E; I++) { + switch (*I) { + default: + Result += convertConstraint(I); + break; + // Ignore these + case '*': + case '?': + case '!': + case '=': // Will see this and the following in mult-alt constraints. + case '+': + break; + case '#': // Ignore the rest of the constraint alternative. + while (I + 1 != E && I[1] != ',') + I++; + break; + case '&': + case '%': + Result += *I; + while (I + 1 != E && I[1] == *I) + I++; + break; + case ',': + Result += "|"; + break; + case 'g': + Result += "imr"; + break; + case '[': { + assert(OutCons && + "Must pass output names to constraints with a symbolic name"); + unsigned Index; + bool ResolveResult = resolveSymbolicName(I, *OutCons, Index); + assert(ResolveResult && "Could not resolve symbolic name"); + (void)ResolveResult; + Result += llvm::utostr(Index); + break; + } + } + } + return Result; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 3b22c689ecd9f..bd09d78cd0eb6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1278,9 +1278,7 @@ mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e, "ScalarExprEmitter::emitPromoted unary imag"); return {}; case UO_Real: - cgf.cgm.errorNYI(e->getSourceRange(), - "ScalarExprEmitter::emitPromoted unary real"); - return {}; + return VisitRealImag(uo, promotionType); case UO_Minus: return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType); case UO_Plus: @@ -2087,9 +2085,13 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) { if (e->getType()->isVectorType() && e->getType()->castAs()->getVectorKind() == VectorKind::Generic) { - assert(!cir::MissingFeatures::vectorType()); - cgf.cgm.errorNYI(e->getSourceRange(), "vector logical not"); - return {}; + mlir::Value oper = Visit(e->getSubExpr()); + mlir::Location loc = cgf.getLoc(e->getExprLoc()); + auto operVecTy = mlir::cast(oper.getType()); + auto exprVecTy = mlir::cast(cgf.convertType(e->getType())); + mlir::Value zeroVec = builder.getNullValue(operVecTy, loc); + return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq, + oper, zeroVec); } // Compare operand to zero. @@ -2132,6 +2134,9 @@ mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e, // this won't work for, e.g. an Obj-C property mlir::Value complex = cgf.emitComplexExpr(op); if (e->isGLValue() && !promotionTy.isNull()) { + promotionTy = promotionTy->isAnyComplexType() + ? promotionTy + : cgf.getContext().getComplexType(promotionTy); complex = cgf.emitPromotedValue(complex, promotionTy); } @@ -2360,4 +2365,4 @@ mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e, bool isPre) { return ScalarExprEmitter(*this, builder) .emitScalarPrePostIncDec(e, lv, kind, isPre); -} \ No newline at end of file +} diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp new file mode 100644 index 0000000000000..a4c2641fe631c --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp @@ -0,0 +1,316 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Helperes to emit OpenACC clause recipes as CIR code. +// +//===----------------------------------------------------------------------===// + +#include "CIRGenOpenACCRecipe.h" + +namespace clang::CIRGen { +mlir::Block *OpenACCRecipeBuilderBase::createRecipeBlock(mlir::Region ®ion, + mlir::Type opTy, + mlir::Location loc, + size_t numBounds, + bool isInit) { + llvm::SmallVector types; + types.reserve(numBounds + 2); + types.push_back(opTy); + // The init section is the only one that doesn't have TWO copies of the + // operation-type. Copy has a to/from, and destroy has a + // 'reference'/'privatized' copy version. + if (!isInit) + types.push_back(opTy); + + auto boundsTy = mlir::acc::DataBoundsType::get(&cgf.getMLIRContext()); + for (size_t i = 0; i < numBounds; ++i) + types.push_back(boundsTy); + + llvm::SmallVector locs{types.size(), loc}; + return builder.createBlock(®ion, region.end(), types, locs); +} + +mlir::Value +OpenACCRecipeBuilderBase::createBoundsLoop(mlir::Value subscriptedValue, + mlir::Value bound, + mlir::Location loc, bool inverse) { + mlir::Operation *bodyInsertLoc; + + mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy); + auto itrPtrTy = cir::PointerType::get(itrTy); + mlir::IntegerAttr itrAlign = + cgf.cgm.getSize(cgf.getContext().getTypeAlignInChars( + cgf.getContext().UnsignedLongLongTy)); + auto idxType = mlir::IndexType::get(&cgf.getMLIRContext()); + + auto doSubscriptOp = [&](mlir::Value subVal, + cir::LoadOp idxLoad) -> mlir::Value { + auto eltTy = cast(subVal.getType()).getPointee(); + + if (auto arrayTy = dyn_cast(eltTy)) + return builder.getArrayElement(loc, loc, subVal, arrayTy.getElementType(), + idxLoad.getResult(), + /*shouldDecay=*/true); + + assert(isa(eltTy)); + + auto eltLoad = cir::LoadOp::create(builder, loc, {subVal}); + + return cir::PtrStrideOp::create(builder, loc, eltLoad.getType(), eltLoad, + idxLoad.getResult()) + .getResult(); + }; + + auto forStmtBuilder = [&]() { + // get the lower and upper bound for iterating over. + auto lowerBoundVal = + mlir::acc::GetLowerboundOp::create(builder, loc, idxType, bound); + auto lbConversion = mlir::UnrealizedConversionCastOp::create( + builder, loc, itrTy, lowerBoundVal.getResult()); + auto upperBoundVal = + mlir::acc::GetUpperboundOp::create(builder, loc, idxType, bound); + auto ubConversion = mlir::UnrealizedConversionCastOp::create( + builder, loc, itrTy, upperBoundVal.getResult()); + + // Create a memory location for the iterator. + auto itr = + cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "iter", itrAlign); + // Store to the iterator: either lower bound, or if inverse loop, upper + // bound. + if (inverse) { + cir::ConstantOp constOne = builder.getConstInt(loc, itrTy, 1); + + auto sub = + cir::BinOp::create(builder, loc, itrTy, cir::BinOpKind::Sub, + ubConversion.getResult(0), constOne.getResult()); + + // Upperbound is exclusive, so subtract 1. + builder.CIRBaseBuilderTy::createStore(loc, sub.getResult(), itr); + } else { + // Lowerbound is inclusive, so we can include it. + builder.CIRBaseBuilderTy::createStore(loc, lbConversion.getResult(0), + itr); + } + // Save the 'end' iterator based on whether we are inverted or not. This + // end iterator never changes, so we can just get it and convert it, so no + // need to store/load/etc. + auto endItr = inverse ? lbConversion : ubConversion; + + builder.createFor( + loc, + /*condBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto loadCur = cir::LoadOp::create(builder, loc, {itr}); + // Use 'not equal' since we are just doing an increment/decrement. + auto cmp = builder.createCompare( + loc, inverse ? cir::CmpOpKind::ge : cir::CmpOpKind::lt, + loadCur.getResult(), endItr.getResult(0)); + builder.createCondition(cmp); + }, + /*bodyBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto load = cir::LoadOp::create(builder, loc, {itr}); + + if (subscriptedValue) + subscriptedValue = doSubscriptOp(subscriptedValue, load); + bodyInsertLoc = builder.createYield(loc); + }, + /*stepBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto load = cir::LoadOp::create(builder, loc, {itr}); + auto unary = cir::UnaryOp::create(builder, loc, load.getType(), + inverse ? cir::UnaryOpKind::Dec + : cir::UnaryOpKind::Inc, + load.getResult()); + builder.CIRBaseBuilderTy::createStore(loc, unary.getResult(), itr); + builder.createYield(loc); + }); + }; + + cir::ScopeOp::create(builder, loc, + [&](mlir::OpBuilder &b, mlir::Location loc) { + forStmtBuilder(); + builder.createYield(loc); + }); + + // Leave the insertion point to be inside the body, so we can loop over + // these things. + builder.setInsertionPoint(bodyInsertLoc); + return subscriptedValue; +} + +mlir::acc::ReductionOperator +OpenACCRecipeBuilderBase::convertReductionOp(OpenACCReductionOperator op) { + switch (op) { + case OpenACCReductionOperator::Addition: + return mlir::acc::ReductionOperator::AccAdd; + case OpenACCReductionOperator::Multiplication: + return mlir::acc::ReductionOperator::AccMul; + case OpenACCReductionOperator::Max: + return mlir::acc::ReductionOperator::AccMax; + case OpenACCReductionOperator::Min: + return mlir::acc::ReductionOperator::AccMin; + case OpenACCReductionOperator::BitwiseAnd: + return mlir::acc::ReductionOperator::AccIand; + case OpenACCReductionOperator::BitwiseOr: + return mlir::acc::ReductionOperator::AccIor; + case OpenACCReductionOperator::BitwiseXOr: + return mlir::acc::ReductionOperator::AccXor; + case OpenACCReductionOperator::And: + return mlir::acc::ReductionOperator::AccLand; + case OpenACCReductionOperator::Or: + return mlir::acc::ReductionOperator::AccLor; + case OpenACCReductionOperator::Invalid: + llvm_unreachable("invalid reduction operator"); + } + + llvm_unreachable("invalid reduction operator"); +} + +// This function generates the 'destroy' section for a recipe. Note +// that this function is not 'insertion point' clean, in that it alters the +// insertion point to be inside of the 'destroy' section of the recipe, but +// doesn't restore it aftewards. +void OpenACCRecipeBuilderBase::createRecipeDestroySection( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + CharUnits alignment, QualType origType, size_t numBounds, QualType baseType, + mlir::Region &destroyRegion) { + mlir::Block *block = createRecipeBlock(destroyRegion, mainOp.getType(), loc, + numBounds, /*isInit=*/false); + builder.setInsertionPointToEnd(&destroyRegion.back()); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + mlir::Type elementTy = + mlir::cast(mainOp.getType()).getPointee(); + auto emitDestroy = [&](mlir::Value var, mlir::Type ty) { + Address addr{var, ty, alignment}; + cgf.emitDestroy(addr, origType, + cgf.getDestroyer(QualType::DK_cxx_destructor)); + }; + + if (numBounds) { + mlir::OpBuilder::InsertionGuard guardCase(builder); + // Get the range of bounds arguments, which are all but the 1st 2. 1st is + // a 'reference', 2nd is the 'private' variant we need to destroy from. + llvm::MutableArrayRef boundsRange = + block->getArguments().drop_front(2); + + mlir::Value subscriptedValue = block->getArgument(1); + for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange)) + subscriptedValue = createBoundsLoop(subscriptedValue, boundArg, loc, + /*inverse=*/true); + + emitDestroy(subscriptedValue, cgf.cgm.convertType(origType)); + } else { + // If we don't have any bounds, we can just destroy the variable directly. + // The destroy region has a signature of "original item, privatized item". + // So the 2nd item is the one that needs destroying, the former is just + // for reference and we don't really have a need for it at the moment. + emitDestroy(block->getArgument(1), elementTy); + } + + mlir::acc::YieldOp::create(builder, locEnd); +} + +// TODO: OpenACC: When we get this implemented for the reduction/firstprivate, +// this might end up re-merging with createRecipeInitCopy. For now, keep it +// separate until we're sure what everything looks like to keep this as clean +// as possible. +void OpenACCRecipeBuilderBase::createPrivateInitRecipe( + mlir::Location loc, mlir::Location locEnd, SourceRange exprRange, + mlir::Value mainOp, mlir::acc::PrivateRecipeOp recipe, size_t numBounds, + llvm::ArrayRef boundTypes, const VarDecl *allocaDecl, + QualType origType, const Expr *initExpr) { + assert(allocaDecl && "Required recipe variable not set?"); + CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, allocaDecl}; + + mlir::Block *block = + createRecipeBlock(recipe.getInitRegion(), mainOp.getType(), loc, + numBounds, /*isInit=*/true); + builder.setInsertionPointToEnd(&recipe.getInitRegion().back()); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + const Type *allocaPointeeType = + allocaDecl->getType()->getPointeeOrArrayElementType(); + // We are OK with no init for builtins, arrays of builtins, or pointers, + // else we should NYI so we know to go look for these. + if (cgf.getContext().getLangOpts().CPlusPlus && !allocaDecl->getInit() && + !allocaDecl->getType()->isPointerType() && + !allocaPointeeType->isBuiltinType() && + !allocaPointeeType->isPointerType()) { + // If we don't have any initialization recipe, we failed during Sema to + // initialize this correctly. If we disable the + // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll + // emit an error to tell us. However, emitting those errors during + // production is a violation of the standard, so we cannot do them. + cgf.cgm.errorNYI(exprRange, "private default-init recipe"); + } + + if (!numBounds) { + // This is an 'easy' case, we just have to use the builtin init stuff to + // initialize this variable correctly. + CIRGenFunction::AutoVarEmission tempDeclEmission = + cgf.emitAutoVarAlloca(*allocaDecl, builder.saveInsertionPoint()); + cgf.emitAutoVarInit(tempDeclEmission); + } else { + cgf.cgm.errorNYI(exprRange, "private-init with bounds"); + } + + mlir::acc::YieldOp::create(builder, locEnd); +} + +void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + CIRGenFunction::AutoVarEmission tempDeclEmission, + mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe, + const VarDecl *temporary) { + mlir::Block *block = + createRecipeBlock(recipe.getCopyRegion(), mainOp.getType(), loc, + /*numBounds=*/0, /*isInit=*/false); + builder.setInsertionPointToEnd(&recipe.getCopyRegion().back()); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + mlir::BlockArgument fromArg = block->getArgument(0); + mlir::BlockArgument toArg = block->getArgument(1); + + mlir::Type elementTy = + mlir::cast(mainOp.getType()).getPointee(); + + // Set the address of the emission to be the argument, so that we initialize + // that instead of the variable in the other block. + tempDeclEmission.setAllocatedAddress( + Address{toArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); + tempDeclEmission.EmittedAsOffload = true; + + CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary}; + cgf.setAddrOfLocalVar( + temporary, + Address{fromArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); + + cgf.emitAutoVarInit(tempDeclEmission); + mlir::acc::YieldOp::create(builder, locEnd); +} +// This function generates the 'combiner' section for a reduction recipe. Note +// that this function is not 'insertion point' clean, in that it alters the +// insertion point to be inside of the 'combiner' section of the recipe, but +// doesn't restore it aftewards. +void OpenACCRecipeBuilderBase::createReductionRecipeCombiner( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + mlir::acc::ReductionRecipeOp recipe) { + mlir::Block *block = builder.createBlock( + &recipe.getCombinerRegion(), recipe.getCombinerRegion().end(), + {mainOp.getType(), mainOp.getType()}, {loc, loc}); + builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back()); + CIRGenFunction::LexicalScope ls(cgf, loc, block); + + mlir::BlockArgument lhsArg = block->getArgument(0); + + mlir::acc::YieldOp::create(builder, locEnd, lhsArg); +} + +} // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h index 27178c9662475..978c671f9a170 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h @@ -10,6 +10,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenCXXABI.h" #include "CIRGenFunction.h" #include "clang/AST/ASTContext.h" @@ -22,167 +23,54 @@ #include "mlir/Dialect/OpenACC/OpenACC.h" namespace clang::CIRGen { - -template class OpenACCRecipeBuilder { +class OpenACCRecipeBuilderBase { +protected: CIRGen::CIRGenFunction &cgf; CIRGen::CIRGenBuilderTy &builder; mlir::Block *createRecipeBlock(mlir::Region ®ion, mlir::Type opTy, mlir::Location loc, size_t numBounds, - bool isInit) { - llvm::SmallVector types; - types.reserve(numBounds + 2); - types.push_back(opTy); - // The init section is the only one that doesn't have TWO copies of the - // operation-type. Copy has a to/from, and destroy has a - // 'reference'/'privatized' copy version. - if (!isInit) - types.push_back(opTy); - - auto boundsTy = mlir::acc::DataBoundsType::get(&cgf.getMLIRContext()); - for (size_t i = 0; i < numBounds; ++i) - types.push_back(boundsTy); - - llvm::SmallVector locs{types.size(), loc}; - return builder.createBlock(®ion, region.end(), types, locs); - } + bool isInit); // Creates a loop through an 'acc.bounds', leaving the 'insertion' point to be // the inside of the loop body. Traverses LB->UB UNLESS `inverse` is set. // Returns the 'subscriptedValue' changed with the new bounds subscript. mlir::Value createBoundsLoop(mlir::Value subscriptedValue, mlir::Value bound, - mlir::Location loc, bool inverse) { - mlir::Operation *bodyInsertLoc; - - mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy); - auto itrPtrTy = cir::PointerType::get(itrTy); - mlir::IntegerAttr itrAlign = - cgf.cgm.getSize(cgf.getContext().getTypeAlignInChars( - cgf.getContext().UnsignedLongLongTy)); - auto idxType = mlir::IndexType::get(&cgf.getMLIRContext()); - - auto doSubscriptOp = [&](mlir::Value subVal, - cir::LoadOp idxLoad) -> mlir::Value { - auto eltTy = cast(subVal.getType()).getPointee(); - - if (auto arrayTy = dyn_cast(eltTy)) - return builder.getArrayElement( - loc, loc, subVal, arrayTy.getElementType(), idxLoad.getResult(), - /*shouldDecay=*/true); - - assert(isa(eltTy)); - - auto eltLoad = cir::LoadOp::create(builder, loc, {subVal}); - - return cir::PtrStrideOp::create(builder, loc, eltLoad.getType(), eltLoad, - idxLoad.getResult()) - .getResult(); - }; - - auto forStmtBuilder = [&]() { - // get the lower and upper bound for iterating over. - auto lowerBoundVal = - mlir::acc::GetLowerboundOp::create(builder, loc, idxType, bound); - auto lbConversion = mlir::UnrealizedConversionCastOp::create( - builder, loc, itrTy, lowerBoundVal.getResult()); - auto upperBoundVal = - mlir::acc::GetUpperboundOp::create(builder, loc, idxType, bound); - auto ubConversion = mlir::UnrealizedConversionCastOp::create( - builder, loc, itrTy, upperBoundVal.getResult()); - - // Create a memory location for the iterator. - auto itr = cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "iter", - itrAlign); - // Store to the iterator: either lower bound, or if inverse loop, upper - // bound. - if (inverse) { - cir::ConstantOp constOne = builder.getConstInt(loc, itrTy, 1); - - auto sub = - cir::BinOp::create(builder, loc, itrTy, cir::BinOpKind::Sub, - ubConversion.getResult(0), constOne.getResult()); - - // Upperbound is exclusive, so subtract 1. - builder.CIRBaseBuilderTy::createStore(loc, sub.getResult(), itr); - } else { - // Lowerbound is inclusive, so we can include it. - builder.CIRBaseBuilderTy::createStore(loc, lbConversion.getResult(0), - itr); - } - // Save the 'end' iterator based on whether we are inverted or not. This - // end iterator never changes, so we can just get it and convert it, so no - // need to store/load/etc. - auto endItr = inverse ? lbConversion : ubConversion; - - builder.createFor( - loc, - /*condBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto loadCur = cir::LoadOp::create(builder, loc, {itr}); - // Use 'not equal' since we are just doing an increment/decrement. - auto cmp = builder.createCompare( - loc, inverse ? cir::CmpOpKind::ge : cir::CmpOpKind::lt, - loadCur.getResult(), endItr.getResult(0)); - builder.createCondition(cmp); - }, - /*bodyBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto load = cir::LoadOp::create(builder, loc, {itr}); - - if (subscriptedValue) - subscriptedValue = doSubscriptOp(subscriptedValue, load); - bodyInsertLoc = builder.createYield(loc); - }, - /*stepBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto load = cir::LoadOp::create(builder, loc, {itr}); - auto unary = cir::UnaryOp::create(builder, loc, load.getType(), - inverse ? cir::UnaryOpKind::Dec - : cir::UnaryOpKind::Inc, - load.getResult()); - builder.CIRBaseBuilderTy::createStore(loc, unary.getResult(), itr); - builder.createYield(loc); - }); - }; - - cir::ScopeOp::create(builder, loc, - [&](mlir::OpBuilder &b, mlir::Location loc) { - forStmtBuilder(); - builder.createYield(loc); - }); + mlir::Location loc, bool inverse); + mlir::acc::ReductionOperator convertReductionOp(OpenACCReductionOperator op); + void createFirstprivateRecipeCopy( + mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, + CIRGenFunction::AutoVarEmission tempDeclEmission, + mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe, + const VarDecl *temporary); - // Leave the insertion point to be inside the body, so we can loop over - // these things. - builder.setInsertionPoint(bodyInsertLoc); - return subscriptedValue; - } + // This function generates the 'combiner' section for a reduction recipe. Note + // that this function is not 'insertion point' clean, in that it alters the + // insertion point to be inside of the 'combiner' section of the recipe, but + // doesn't restore it aftewards. + void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd, + mlir::Value mainOp, + mlir::acc::ReductionRecipeOp recipe); + void createPrivateInitRecipe(mlir::Location loc, mlir::Location locEnd, + SourceRange exprRange, mlir::Value mainOp, + mlir::acc::PrivateRecipeOp recipe, + size_t numBounds, + llvm::ArrayRef boundTypes, + const VarDecl *allocaDecl, QualType origType, + const Expr *initExpr); - mlir::acc::ReductionOperator convertReductionOp(OpenACCReductionOperator op) { - switch (op) { - case OpenACCReductionOperator::Addition: - return mlir::acc::ReductionOperator::AccAdd; - case OpenACCReductionOperator::Multiplication: - return mlir::acc::ReductionOperator::AccMul; - case OpenACCReductionOperator::Max: - return mlir::acc::ReductionOperator::AccMax; - case OpenACCReductionOperator::Min: - return mlir::acc::ReductionOperator::AccMin; - case OpenACCReductionOperator::BitwiseAnd: - return mlir::acc::ReductionOperator::AccIand; - case OpenACCReductionOperator::BitwiseOr: - return mlir::acc::ReductionOperator::AccIor; - case OpenACCReductionOperator::BitwiseXOr: - return mlir::acc::ReductionOperator::AccXor; - case OpenACCReductionOperator::And: - return mlir::acc::ReductionOperator::AccLand; - case OpenACCReductionOperator::Or: - return mlir::acc::ReductionOperator::AccLor; - case OpenACCReductionOperator::Invalid: - llvm_unreachable("invalid reduction operator"); - } + void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd, + mlir::Value mainOp, CharUnits alignment, + QualType origType, size_t numBounds, + QualType baseType, + mlir::Region &destroyRegion); - llvm_unreachable("invalid reduction operator"); - } + OpenACCRecipeBuilderBase(CIRGen::CIRGenFunction &cgf, + CIRGen::CIRGenBuilderTy &builder) + : cgf(cgf), builder(builder) {} +}; +template +class OpenACCRecipeBuilder : OpenACCRecipeBuilderBase { std::string getRecipeName(SourceRange loc, QualType baseType, unsigned numBounds, OpenACCReductionOperator reductionOp) { @@ -248,86 +136,6 @@ template class OpenACCRecipeBuilder { return recipeName; } - void createFirstprivateRecipeCopy( - mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, - CIRGenFunction::AutoVarEmission tempDeclEmission, - mlir::acc::FirstprivateRecipeOp recipe, const VarDecl *varRecipe, - const VarDecl *temporary) { - mlir::Block *block = - createRecipeBlock(recipe.getCopyRegion(), mainOp.getType(), loc, - /*numBounds=*/0, /*isInit=*/false); - builder.setInsertionPointToEnd(&recipe.getCopyRegion().back()); - CIRGenFunction::LexicalScope ls(cgf, loc, block); - - mlir::BlockArgument fromArg = block->getArgument(0); - mlir::BlockArgument toArg = block->getArgument(1); - - mlir::Type elementTy = - mlir::cast(mainOp.getType()).getPointee(); - - // Set the address of the emission to be the argument, so that we initialize - // that instead of the variable in the other block. - tempDeclEmission.setAllocatedAddress( - Address{toArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); - tempDeclEmission.EmittedAsOffload = true; - - CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary}; - cgf.setAddrOfLocalVar( - temporary, - Address{fromArg, elementTy, cgf.getContext().getDeclAlign(varRecipe)}); - - cgf.emitAutoVarInit(tempDeclEmission); - mlir::acc::YieldOp::create(builder, locEnd); - } - // TODO: OpenACC: When we get this implemented for the reduction/firstprivate, - // this might end up re-merging with createRecipeInitCopy. For now, keep it - // separate until we're sure what everything looks like to keep this as clean - // as possible. - void createPrivateInitRecipe(mlir::Location loc, mlir::Location locEnd, - SourceRange exprRange, mlir::Value mainOp, - mlir::acc::PrivateRecipeOp recipe, - size_t numBounds, - llvm::ArrayRef boundTypes, - const VarDecl *allocaDecl, QualType origType, - const Expr *initExpr) { - assert(allocaDecl && "Required recipe variable not set?"); - CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, allocaDecl}; - - mlir::Block *block = - createRecipeBlock(recipe.getInitRegion(), mainOp.getType(), loc, - numBounds, /*isInit=*/true); - builder.setInsertionPointToEnd(&recipe.getInitRegion().back()); - CIRGenFunction::LexicalScope ls(cgf, loc, block); - - const Type *allocaPointeeType = - allocaDecl->getType()->getPointeeOrArrayElementType(); - // We are OK with no init for builtins, arrays of builtins, or pointers, - // else we should NYI so we know to go look for these. - if (cgf.getContext().getLangOpts().CPlusPlus && !allocaDecl->getInit() && - !allocaDecl->getType()->isPointerType() && - !allocaPointeeType->isBuiltinType() && - !allocaPointeeType->isPointerType()) { - // If we don't have any initialization recipe, we failed during Sema to - // initialize this correctly. If we disable the - // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll - // emit an error to tell us. However, emitting those errors during - // production is a violation of the standard, so we cannot do them. - cgf.cgm.errorNYI(exprRange, "private default-init recipe"); - } - - if (!numBounds) { - // This is an 'easy' case, we just have to use the builtin init stuff to - // initialize this variable correctly. - CIRGenFunction::AutoVarEmission tempDeclEmission = - cgf.emitAutoVarAlloca(*allocaDecl, builder.saveInsertionPoint()); - cgf.emitAutoVarInit(tempDeclEmission); - } else { - cgf.cgm.errorNYI(exprRange, "private-init with bounds"); - } - - mlir::acc::YieldOp::create(builder, locEnd); - } - // Create the 'init' section of the recipe, including the 'copy' section for // 'firstprivate'. Note that this function is not 'insertion point' clean, in // that it alters the insertion point to be inside of the 'destroy' section of @@ -336,6 +144,9 @@ template class OpenACCRecipeBuilder { SourceRange exprRange, mlir::Value mainOp, RecipeTy recipe, const VarDecl *varRecipe, const VarDecl *temporary) { + // TODO: OpenACC: when we get the 'pointer' variants for + // firstprivate/reduction, this probably should be removed/split into + // functions for the BuilderBase. assert(varRecipe && "Required recipe variable not set?"); CIRGenFunction::AutoVarEmission tempDeclEmission{ @@ -401,74 +212,10 @@ template class OpenACCRecipeBuilder { } } - // This function generates the 'combiner' section for a reduction recipe. Note - // that this function is not 'insertion point' clean, in that it alters the - // insertion point to be inside of the 'combiner' section of the recipe, but - // doesn't restore it aftewards. - void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd, - mlir::Value mainOp, - mlir::acc::ReductionRecipeOp recipe) { - mlir::Block *block = builder.createBlock( - &recipe.getCombinerRegion(), recipe.getCombinerRegion().end(), - {mainOp.getType(), mainOp.getType()}, {loc, loc}); - builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back()); - CIRGenFunction::LexicalScope ls(cgf, loc, block); - - mlir::BlockArgument lhsArg = block->getArgument(0); - - mlir::acc::YieldOp::create(builder, locEnd, lhsArg); - } - - // This function generates the 'destroy' section for a recipe. Note - // that this function is not 'insertion point' clean, in that it alters the - // insertion point to be inside of the 'destroy' section of the recipe, but - // doesn't restore it aftewards. - void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd, - mlir::Value mainOp, CharUnits alignment, - QualType origType, size_t numBounds, - QualType baseType, - mlir::Region &destroyRegion) { - mlir::Block *block = createRecipeBlock(destroyRegion, mainOp.getType(), loc, - numBounds, /*isInit=*/false); - builder.setInsertionPointToEnd(&destroyRegion.back()); - CIRGenFunction::LexicalScope ls(cgf, loc, block); - - mlir::Type elementTy = - mlir::cast(mainOp.getType()).getPointee(); - auto emitDestroy = [&](mlir::Value var, mlir::Type ty) { - Address addr{var, ty, alignment}; - cgf.emitDestroy(addr, origType, - cgf.getDestroyer(QualType::DK_cxx_destructor)); - }; - - if (numBounds) { - mlir::OpBuilder::InsertionGuard guardCase(builder); - // Get the range of bounds arguments, which are all but the 1st 2. 1st is - // a 'reference', 2nd is the 'private' variant we need to destroy from. - llvm::MutableArrayRef boundsRange = - block->getArguments().drop_front(2); - - mlir::Value subscriptedValue = block->getArgument(1); - for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange)) - subscriptedValue = createBoundsLoop(subscriptedValue, boundArg, loc, - /*inverse=*/true); - - emitDestroy(subscriptedValue, cgf.cgm.convertType(origType)); - } else { - // If we don't have any bounds, we can just destroy the variable directly. - // The destroy region has a signature of "original item, privatized item". - // So the 2nd item is the one that needs destroying, the former is just - // for reference and we don't really have a need for it at the moment. - emitDestroy(block->getArgument(1), elementTy); - } - - mlir::acc::YieldOp::create(builder, locEnd); - } - public: OpenACCRecipeBuilder(CIRGen::CIRGenFunction &cgf, CIRGen::CIRGenBuilderTy &builder) - : cgf(cgf), builder(builder) {} + : OpenACCRecipeBuilderBase(cgf, builder) {} RecipeTy getOrCreateRecipe(ASTContext &astCtx, mlir::OpBuilder::InsertPoint &insertLocation, const Expr *varRef, const VarDecl *varRecipe, diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt index c7b76e8372efc..c1f27ec8ba858 100644 --- a/clang/lib/CIR/CodeGen/CMakeLists.txt +++ b/clang/lib/CIR/CodeGen/CMakeLists.txt @@ -31,6 +31,7 @@ add_clang_library(clangCIR CIRGenModule.cpp CIRGenOpenACC.cpp CIRGenOpenACCClause.cpp + CIRGenOpenACCRecipe.cpp CIRGenRecordLayoutBuilder.cpp CIRGenStmt.cpp CIRGenStmtOpenACC.cpp diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp index 3f38741f24a60..57db20f70801b 100644 --- a/clang/lib/CodeGen/BackendUtil.cpp +++ b/clang/lib/CodeGen/BackendUtil.cpp @@ -1476,13 +1476,13 @@ void clang::EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts, } void clang::EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts, - DiagnosticsEngine &Diags) { + llvm::vfs::FileSystem &VFS, DiagnosticsEngine &Diags) { if (CGOpts.OffloadObjects.empty()) return; for (StringRef OffloadObject : CGOpts.OffloadObjects) { llvm::ErrorOr> ObjectOrErr = - llvm::MemoryBuffer::getFileOrSTDIN(OffloadObject); + VFS.getBufferForFile(OffloadObject); if (ObjectOrErr.getError()) { auto DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "could not open '%0' for embedding"); diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index 9106c4cd8e139..eeb0fd6412946 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -734,7 +734,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr, CGF.Builder.getInt8(1), Order, Scope, E); RMWI->setVolatile(E->isVolatile()); - llvm::Value *Result = CGF.Builder.CreateIsNotNull(RMWI, "tobool"); + llvm::Value *Result = CGF.EmitToMemory( + CGF.Builder.CreateIsNotNull(RMWI, "tobool"), E->getType()); auto *I = CGF.Builder.CreateStore(Result, Dest); CGF.addInstToCurrentSourceAtom(I, Result); return; diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index 68080711c4ace..12c7d48e20d67 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -26,7 +26,6 @@ #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" -#include "clang/AST/LambdaCapture.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/VTableBuilder.h" @@ -1904,59 +1903,46 @@ CGDebugInfo::createInlinedSubprogram(StringRef FuncName, return SP; } -llvm::StringRef -CGDebugInfo::GetLambdaCaptureName(const LambdaCapture &Capture) { - if (Capture.capturesThis()) - return CGM.getCodeGenOpts().EmitCodeView ? "__this" : "this"; - - assert(Capture.capturesVariable()); - - const ValueDecl *CaptureDecl = Capture.getCapturedVar(); - assert(CaptureDecl && "Expected valid decl for captured variable."); - - return CaptureDecl->getName(); -} - void CGDebugInfo::CollectRecordLambdaFields( const CXXRecordDecl *CXXDecl, SmallVectorImpl &elements, llvm::DIType *RecordTy) { // For C++11 Lambdas a Field will be the same as a Capture, but the Capture // has the name and the location of the variable so we should iterate over // both concurrently. + const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(CXXDecl); RecordDecl::field_iterator Field = CXXDecl->field_begin(); unsigned fieldno = 0; for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(), E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) { - const LambdaCapture &Capture = *I; - const uint64_t FieldOffset = - CGM.getContext().getASTRecordLayout(CXXDecl).getFieldOffset(fieldno); - - assert(!Field->isBitField() && "lambdas don't have bitfield members!"); - - SourceLocation Loc; - uint32_t Align = 0; - - if (Capture.capturesThis()) { + const LambdaCapture &C = *I; + if (C.capturesVariable()) { + SourceLocation Loc = C.getLocation(); + assert(!Field->isBitField() && "lambdas don't have bitfield members!"); + ValueDecl *V = C.getCapturedVar(); + StringRef VName = V->getName(); + llvm::DIFile *VUnit = getOrCreateFile(Loc); + auto Align = getDeclAlignIfRequired(V, CGM.getContext()); + llvm::DIType *FieldType = createFieldType( + VName, Field->getType(), Loc, Field->getAccess(), + layout.getFieldOffset(fieldno), Align, VUnit, RecordTy, CXXDecl); + elements.push_back(FieldType); + } else if (C.capturesThis()) { // TODO: Need to handle 'this' in some way by probably renaming the // this of the lambda class and having a field member of 'this' or // by using AT_object_pointer for the function and having that be // used as 'this' for semantic references. - Loc = Field->getLocation(); - } else { - Loc = Capture.getLocation(); - - const ValueDecl *CaptureDecl = Capture.getCapturedVar(); - assert(CaptureDecl && "Expected valid decl for captured variable."); - - Align = getDeclAlignIfRequired(CaptureDecl, CGM.getContext()); + FieldDecl *f = *Field; + llvm::DIFile *VUnit = getOrCreateFile(f->getLocation()); + QualType type = f->getType(); + StringRef ThisName = + CGM.getCodeGenOpts().EmitCodeView ? "__this" : "this"; + llvm::DIType *fieldType = createFieldType( + ThisName, type, f->getLocation(), f->getAccess(), + layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl); + + elements.push_back(fieldType); } - - llvm::DIFile *VUnit = getOrCreateFile(Loc); - - elements.push_back(createFieldType( - GetLambdaCaptureName(Capture), Field->getType(), Loc, - Field->getAccess(), FieldOffset, Align, VUnit, RecordTy, CXXDecl)); } } diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h index 78c3eb9c5792e..f86077369a42a 100644 --- a/clang/lib/CodeGen/CGDebugInfo.h +++ b/clang/lib/CodeGen/CGDebugInfo.h @@ -397,7 +397,6 @@ class CGDebugInfo { void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile *F, SmallVectorImpl &E, llvm::DICompositeType *RecordTy); - llvm::StringRef GetLambdaCaptureName(const LambdaCapture &Capture); /// If the C++ class has vtable info then insert appropriate debug /// info entry in EltTys vector. diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 4fa25c5d66669..f319b176513f8 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -3672,17 +3672,19 @@ Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E, // If it's an l-value, load through the appropriate subobject l-value. // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. - if (E->isGLValue()) { + if (E->isGLValue()) { if (!PromotionType.isNull()) { CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr( Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true); - if (result.first) - result.first = CGF.EmitPromotedValue(result, PromotionType).first; - return result.first; - } else { - return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) - .getScalarVal(); + PromotionType = PromotionType->isAnyComplexType() + ? PromotionType + : CGF.getContext().getComplexType(PromotionType); + return result.first ? CGF.EmitPromotedValue(result, PromotionType).first + : result.first; } + + return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) + .getScalarVal(); } // Otherwise, calculate and project. return CGF.EmitComplexExpr(Op, false, true).first; @@ -3715,13 +3717,16 @@ Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E, if (!PromotionType.isNull()) { CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr( Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign); - if (result.second) - result.second = CGF.EmitPromotedValue(result, PromotionType).second; - return result.second; - } else { - return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) - .getScalarVal(); + PromotionType = PromotionType->isAnyComplexType() + ? PromotionType + : CGF.getContext().getComplexType(PromotionType); + return result.second + ? CGF.EmitPromotedValue(result, PromotionType).second + : result.second; } + + return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) + .getScalarVal(); } // Otherwise, calculate and project. return CGF.EmitComplexExpr(Op, true, false).second; diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index c90e1a487daf9..75bde3f72c4c2 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -1542,15 +1542,14 @@ static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc( SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(BeginLoc); - llvm::sys::fs::UniqueID ID; - if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) { + if (CGM.getFileSystem()->exists(PLoc.getFilename())) PLoc = SM.getPresumedLoc(BeginLoc, /*UseLineDirectives=*/false); - } return std::pair(PLoc.getFilename(), PLoc.getLine()); }; - return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, ParentName); + return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, + *CGM.getFileSystem(), ParentName); } ConstantAddress CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) { @@ -2703,7 +2702,8 @@ llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF, } llvm::Value *CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF, - const Expr *Message) { + const Expr *Message, + SourceLocation Loc) { if (!Message) return llvm::ConstantPointerNull::get(CGF.VoidPtrTy); return CGF.EmitScalarExpr(Message); @@ -2713,11 +2713,13 @@ llvm::Value * CGOpenMPRuntime::emitMessageClause(CodeGenFunction &CGF, const OMPMessageClause *MessageClause) { return emitMessageClause( - CGF, MessageClause ? MessageClause->getMessageString() : nullptr); + CGF, MessageClause ? MessageClause->getMessageString() : nullptr, + MessageClause->getBeginLoc()); } llvm::Value * -CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity) { +CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity, + SourceLocation Loc) { // OpenMP 6.0, 10.4: "If no severity clause is specified then the effect is // as if sev-level is fatal." return llvm::ConstantInt::get(CGM.Int32Ty, @@ -2727,13 +2729,15 @@ CGOpenMPRuntime::emitSeverityClause(OpenMPSeverityClauseKind Severity) { llvm::Value * CGOpenMPRuntime::emitSeverityClause(const OMPSeverityClause *SeverityClause) { return emitSeverityClause(SeverityClause ? SeverityClause->getSeverityKind() - : OMPC_SEVERITY_unknown); + : OMPC_SEVERITY_unknown, + SeverityClause->getBeginLoc()); } void CGOpenMPRuntime::emitNumThreadsClause( CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity, - const Expr *Message) { + SourceLocation SeverityLoc, const Expr *Message, + SourceLocation MessageLoc) { if (!CGF.HaveInsertPoint()) return; llvm::SmallVector Args( @@ -2745,8 +2749,8 @@ void CGOpenMPRuntime::emitNumThreadsClause( RuntimeFunction FnID = OMPRTL___kmpc_push_num_threads; if (Modifier == OMPC_NUMTHREADS_strict) { FnID = OMPRTL___kmpc_push_num_threads_strict; - Args.push_back(emitSeverityClause(Severity)); - Args.push_back(emitMessageClause(CGF, Message)); + Args.push_back(emitSeverityClause(Severity, SeverityLoc)); + Args.push_back(emitMessageClause(CGF, Message, MessageLoc)); } CGF.EmitRuntimeCall( OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args); @@ -12654,7 +12658,8 @@ llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF, void CGOpenMPSIMDRuntime::emitNumThreadsClause( CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity, - const Expr *Message) { + SourceLocation SeverityLoc, const Expr *Message, + SourceLocation MessageLoc) { llvm_unreachable("Not supported in SIMD-only mode"); } diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h index eb04eceee236c..ba76ba6b5f523 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.h +++ b/clang/lib/CodeGen/CGOpenMPRuntime.h @@ -1049,11 +1049,13 @@ class CGOpenMPRuntime { Address UB, Address ST); virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF, - const Expr *Message); + const Expr *Message, + SourceLocation Loc); virtual llvm::Value *emitMessageClause(CodeGenFunction &CGF, const OMPMessageClause *MessageClause); - virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity); + virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity, + SourceLocation Loc); virtual llvm::Value * emitSeverityClause(const OMPSeverityClause *SeverityClause); @@ -1069,7 +1071,9 @@ class CGOpenMPRuntime { CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal, - const Expr *Message = nullptr); + SourceLocation SeverityLoc = SourceLocation(), + const Expr *Message = nullptr, + SourceLocation MessageLoc = SourceLocation()); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. @@ -1956,7 +1960,9 @@ class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal, - const Expr *Message = nullptr) override; + SourceLocation SeverityLoc = SourceLocation(), + const Expr *Message = nullptr, + SourceLocation MessageLoc = SourceLocation()) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp index 44a091e1b3c75..4272d8b1a1f51 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -899,10 +899,34 @@ void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF, // Nothing to do. } +llvm::Value *CGOpenMPRuntimeGPU::emitMessageClause(CodeGenFunction &CGF, + const Expr *Message, + SourceLocation Loc) { + CGM.getDiags().Report(Loc, diag::warn_omp_gpu_unsupported_clause) + << getOpenMPClauseName(OMPC_message); + return nullptr; +} + +llvm::Value * +CGOpenMPRuntimeGPU::emitSeverityClause(OpenMPSeverityClauseKind Severity, + SourceLocation Loc) { + CGM.getDiags().Report(Loc, diag::warn_omp_gpu_unsupported_clause) + << getOpenMPClauseName(OMPC_severity); + return nullptr; +} + void CGOpenMPRuntimeGPU::emitNumThreadsClause( CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier, OpenMPSeverityClauseKind Severity, - const Expr *Message) { + SourceLocation SeverityLoc, const Expr *Message, + SourceLocation MessageLoc) { + if (Modifier == OMPC_NUMTHREADS_strict) { + CGM.getDiags().Report(Loc, + diag::warn_omp_gpu_unsupported_modifier_for_clause) + << "strict" << getOpenMPClauseName(OMPC_num_threads); + return; + } + // Nothing to do. } diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h index 665221b7d7890..810d6aa082156 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h @@ -162,6 +162,14 @@ class CGOpenMPRuntimeGPU : public CGOpenMPRuntime { llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; + // Currently unsupported on the device. + llvm::Value *emitMessageClause(CodeGenFunction &CGF, const Expr *Message, + SourceLocation Loc) override; + + // Currently unsupported on the device. + virtual llvm::Value *emitSeverityClause(OpenMPSeverityClauseKind Severity, + SourceLocation Loc) override; + /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. @@ -169,7 +177,9 @@ class CGOpenMPRuntimeGPU : public CGOpenMPRuntime { CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier = OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal, - const Expr *Message = nullptr) override; + SourceLocation SeverityLoc = SourceLocation(), + const Expr *Message = nullptr, + SourceLocation MessageLoc = SourceLocation()) override; /// This function ought to emit, in the general case, a call to // the openmp runtime kmpc_push_num_teams. In NVPTX backend it is not needed diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index 440100650c43f..92636f27fd4e5 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -234,6 +234,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef Attrs) { case Stmt::OMPInterchangeDirectiveClass: EmitOMPInterchangeDirective(cast(*S)); break; + case Stmt::OMPFuseDirectiveClass: + EmitOMPFuseDirective(cast(*S)); + break; case Stmt::OMPForDirectiveClass: EmitOMPForDirective(cast(*S)); break; @@ -2471,56 +2474,6 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { CaseRangeBlock = SavedCRBlock; } -static std::string -SimplifyConstraint(const char *Constraint, const TargetInfo &Target, - SmallVectorImpl *OutCons=nullptr) { - std::string Result; - - while (*Constraint) { - switch (*Constraint) { - default: - Result += Target.convertConstraint(Constraint); - break; - // Ignore these - case '*': - case '?': - case '!': - case '=': // Will see this and the following in mult-alt constraints. - case '+': - break; - case '#': // Ignore the rest of the constraint alternative. - while (Constraint[1] && Constraint[1] != ',') - Constraint++; - break; - case '&': - case '%': - Result += *Constraint; - while (Constraint[1] && Constraint[1] == *Constraint) - Constraint++; - break; - case ',': - Result += "|"; - break; - case 'g': - Result += "imr"; - break; - case '[': { - assert(OutCons && - "Must pass output names to constraints with a symbolic name"); - unsigned Index; - bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index); - assert(result && "Could not resolve symbolic name"); (void)result; - Result += llvm::utostr(Index); - break; - } - } - - Constraint++; - } - - return Result; -} - /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared /// as using a particular register add that as a constraint that will be used /// in this asm stmt. @@ -2899,8 +2852,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // Simplify the output constraint. std::string OutputConstraint(S.getOutputConstraint(i)); - OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, - getTarget(), &OutputConstraintInfos); + OutputConstraint = getTarget().simplifyConstraint( + StringRef(OutputConstraint).substr(1), &OutputConstraintInfos); const Expr *OutExpr = S.getOutputExpr(i); OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); @@ -3062,8 +3015,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // Simplify the input constraint. std::string InputConstraint(S.getInputConstraint(i)); - InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), - &OutputConstraintInfos); + InputConstraint = + getTarget().simplifyConstraint(InputConstraint, &OutputConstraintInfos); InputConstraint = AddVariableConstraints( InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp index d72cd8fbfd608..efc06a276267a 100644 --- a/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -201,6 +201,24 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { } else { llvm_unreachable("Unknown loop-based directive kind."); } + doEmitPreinits(PreInits); + PreCondVars.restore(CGF); + } + + void + emitPreInitStmt(CodeGenFunction &CGF, + const OMPCanonicalLoopSequenceTransformationDirective &S) { + const Stmt *PreInits; + if (const auto *Fuse = dyn_cast(&S)) { + PreInits = Fuse->getPreInits(); + } else { + llvm_unreachable( + "Unknown canonical loop sequence transform directive kind."); + } + doEmitPreinits(PreInits); + } + + void doEmitPreinits(const Stmt *PreInits) { if (PreInits) { // CompoundStmts and DeclStmts are used as lists of PreInit statements and // declarations. Since declarations must be visible in the the following @@ -222,7 +240,6 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { CGF.EmitStmt(S); } } - PreCondVars.restore(CGF); } public: @@ -230,6 +247,11 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope { : CodeGenFunction::RunCleanupsScope(CGF) { emitPreInitStmt(CGF, S); } + OMPLoopScope(CodeGenFunction &CGF, + const OMPCanonicalLoopSequenceTransformationDirective &S) + : CodeGenFunction::RunCleanupsScope(CGF) { + emitPreInitStmt(CGF, S); + } }; class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope { @@ -1622,22 +1644,30 @@ static void emitCommonOMPParallelDirective( // if sev-level is fatal." OpenMPSeverityClauseKind Severity = OMPC_SEVERITY_fatal; clang::Expr *Message = nullptr; + SourceLocation SeverityLoc = SourceLocation(); + SourceLocation MessageLoc = SourceLocation(); + llvm::Function *OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen); + if (const auto *NumThreadsClause = S.getSingleClause()) { CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true); Modifier = NumThreadsClause->getModifier(); - if (const auto *MessageClause = S.getSingleClause()) + if (const auto *MessageClause = S.getSingleClause()) { Message = MessageClause->getMessageString(); - if (const auto *SeverityClause = S.getSingleClause()) + MessageLoc = MessageClause->getBeginLoc(); + } + if (const auto *SeverityClause = S.getSingleClause()) { Severity = SeverityClause->getSeverityKind(); + SeverityLoc = SeverityClause->getBeginLoc(); + } CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( CGF, NumThreads, NumThreadsClause->getBeginLoc(), Modifier, Severity, - Message); + SeverityLoc, Message, MessageLoc); } if (const auto *ProcBindClause = S.getSingleClause()) { CodeGenFunction::RunCleanupsScope ProcBindScope(CGF); @@ -1921,6 +1951,15 @@ class OMPTransformDirectiveScopeRAII { CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); } + if (const auto *Dir = + dyn_cast(S)) { + // For simplicity we reuse the loop scope similarly to what we do with + // OMPCanonicalLoopNestTransformationDirective do by being a subclass + // of OMPLoopBasedDirective. + Scope = new OMPLoopScope(CGF, *Dir); + CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP); + CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI); + } } ~OMPTransformDirectiveScopeRAII() { if (!Scope) @@ -1948,8 +1987,7 @@ static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop, return; } if (SimplifiedS == NextLoop) { - if (auto *Dir = - dyn_cast(SimplifiedS)) + if (auto *Dir = dyn_cast(SimplifiedS)) SimplifiedS = Dir->getTransformedStmt(); if (const auto *CanonLoop = dyn_cast(SimplifiedS)) SimplifiedS = CanonLoop->getLoopStmt(); @@ -2944,6 +2982,12 @@ void CodeGenFunction::EmitOMPInterchangeDirective( EmitStmt(S.getTransformedStmt()); } +void CodeGenFunction::EmitOMPFuseDirective(const OMPFuseDirective &S) { + // Emit the de-sugared statement + OMPTransformDirectiveScopeRAII FuseScope(*this, &S); + EmitStmt(S.getTransformedStmt()); +} + void CodeGenFunction::EmitOMPUnrollDirective(const OMPUnrollDirective &S) { bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder; diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp index 8e3234998df2a..9286f1f25c6cc 100644 --- a/clang/lib/CodeGen/CodeGenAction.cpp +++ b/clang/lib/CodeGen/CodeGenAction.cpp @@ -1141,7 +1141,8 @@ void CodeGenAction::ExecuteAction() { TheModule->setTargetTriple(Triple(TargetOpts.Triple)); } - EmbedObject(TheModule.get(), CodeGenOpts, Diagnostics); + EmbedObject(TheModule.get(), CodeGenOpts, CI.getVirtualFileSystem(), + Diagnostics); EmbedBitcode(TheModule.get(), CodeGenOpts, *MainFile); LLVMContext &Ctx = TheModule->getContext(); diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 727487b46054f..f0565c1de04c4 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -3861,6 +3861,7 @@ class CodeGenFunction : public CodeGenTypeCache { void EmitOMPUnrollDirective(const OMPUnrollDirective &S); void EmitOMPReverseDirective(const OMPReverseDirective &S); void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S); + void EmitOMPFuseDirective(const OMPFuseDirective &S); void EmitOMPForDirective(const OMPForDirective &S); void EmitOMPForSimdDirective(const OMPForSimdDirective &S); void EmitOMPScopeDirective(const OMPScopeDirective &S); diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 0eac7c351b164..f6f7f22a09004 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -1556,7 +1556,7 @@ void CodeGenModule::Release() { EmitBackendOptionsMetadata(getCodeGenOpts()); // If there is device offloading code embed it in the host now. - EmbedObject(&getModule(), CodeGenOpts, getDiags()); + EmbedObject(&getModule(), CodeGenOpts, *getFileSystem(), getDiags()); // Set visibility from DLL storage class // We do this at the end of LLVM IR generation; after any operation @@ -8172,12 +8172,17 @@ void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS, // Get the UniqueID for the file containing the decl. llvm::sys::fs::UniqueID ID; - if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) { + auto Status = FS->status(PLoc.getFilename()); + if (!Status) { PLoc = SM.getPresumedLoc(D->getLocation(), /*UseLineDirectives=*/false); assert(PLoc.isValid() && "Source location is expected to be valid."); - if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) - SM.getDiagnostics().Report(diag::err_cannot_open_file) - << PLoc.getFilename() << EC.message(); + Status = FS->status(PLoc.getFilename()); + } + if (!Status) { + SM.getDiagnostics().Report(diag::err_cannot_open_file) + << PLoc.getFilename() << Status.getError().message(); + } else { + ID = Status->getUniqueID(); } OS << llvm::format("%x", ID.getFile()) << llvm::format("%x", ID.getDevice()) << "_" << llvm::utohexstr(Result.low(), /*LowerCase=*/true, /*Width=*/8); diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp index 98b30e084b18b..8f095649f87ce 100644 --- a/clang/lib/CodeGen/CodeGenPGO.cpp +++ b/clang/lib/CodeGen/CodeGenPGO.cpp @@ -972,7 +972,7 @@ void PGOHash::combine(HashType Type) { if (Count && Count % NumTypesPerWord == 0) { using namespace llvm::support; uint64_t Swapped = - endian::byte_swap(Working); + endian::byte_swap(Working, llvm::endianness::little); MD5.update(llvm::ArrayRef((uint8_t *)&Swapped, sizeof(Swapped))); Working = 0; } @@ -999,7 +999,7 @@ uint64_t PGOHash::finalize() { } else { using namespace llvm::support; uint64_t Swapped = - endian::byte_swap(Working); + endian::byte_swap(Working, llvm::endianness::little); MD5.update(llvm::ArrayRef((uint8_t *)&Swapped, sizeof(Swapped))); } } diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 8b1637ced5730..adaa6b3005577 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -695,16 +695,6 @@ RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs, } } -static bool checkDebugInfoOption(const Arg *A, const ArgList &Args, - const Driver &D, const ToolChain &TC) { - assert(A && "Expected non-nullptr argument."); - if (TC.supportsDebugInfoOption(A)) - return true; - D.Diag(diag::warn_drv_unsupported_debug_info_opt_for_target) - << A->getAsString(Args) << TC.getTripleString(); - return false; -} - static void RenderDebugInfoCompressionArgs(const ArgList &Args, ArgStringList &CmdArgs, const Driver &D, @@ -4325,27 +4315,6 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args, Args.addLastArg(CmdArgs, options::OPT_warning_suppression_mappings_EQ); } -DwarfFissionKind tools::getDebugFissionKind(const Driver &D, - const ArgList &Args, Arg *&Arg) { - Arg = Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ, - options::OPT_gno_split_dwarf); - if (!Arg || Arg->getOption().matches(options::OPT_gno_split_dwarf)) - return DwarfFissionKind::None; - - if (Arg->getOption().matches(options::OPT_gsplit_dwarf)) - return DwarfFissionKind::Split; - - StringRef Value = Arg->getValue(); - if (Value == "split") - return DwarfFissionKind::Split; - if (Value == "single") - return DwarfFissionKind::Single; - - D.Diag(diag::err_drv_unsupported_option_argument) - << Arg->getSpelling() << Arg->getValue(); - return DwarfFissionKind::None; -} - static void renderDwarfFormat(const Driver &D, const llvm::Triple &T, const ArgList &Args, ArgStringList &CmdArgs, unsigned DwarfVersion) { diff --git a/clang/lib/Driver/ToolChains/Clang.h b/clang/lib/Driver/ToolChains/Clang.h index 18f6c5ed06a59..c22789591e00a 100644 --- a/clang/lib/Driver/ToolChains/Clang.h +++ b/clang/lib/Driver/ToolChains/Clang.h @@ -187,12 +187,6 @@ class LLVM_LIBRARY_VISIBILITY LinkerWrapper final : public Tool { const char *LinkingOutput) const override; }; -enum class DwarfFissionKind { None, Split, Single }; - -DwarfFissionKind getDebugFissionKind(const Driver &D, - const llvm::opt::ArgList &Args, - llvm::opt::Arg *&Arg); - // Calculate the output path of the module file when compiling a module unit // with the `-fmodule-output` option or `-fmodule-output=` option specified. // The behavior is: diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 08cd98fd04df0..cce4f6487c0bd 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -2270,6 +2270,37 @@ unsigned tools::getDwarfVersion(const ToolChain &TC, return DwarfVersion; } +DwarfFissionKind tools::getDebugFissionKind(const Driver &D, + const ArgList &Args, Arg *&Arg) { + Arg = Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ, + options::OPT_gno_split_dwarf); + if (!Arg || Arg->getOption().matches(options::OPT_gno_split_dwarf)) + return DwarfFissionKind::None; + + if (Arg->getOption().matches(options::OPT_gsplit_dwarf)) + return DwarfFissionKind::Split; + + StringRef Value = Arg->getValue(); + if (Value == "split") + return DwarfFissionKind::Split; + if (Value == "single") + return DwarfFissionKind::Single; + + D.Diag(diag::err_drv_unsupported_option_argument) + << Arg->getSpelling() << Arg->getValue(); + return DwarfFissionKind::None; +} + +bool tools::checkDebugInfoOption(const Arg *A, const ArgList &Args, + const Driver &D, const ToolChain &TC) { + assert(A && "Expected non-nullptr argument."); + if (TC.supportsDebugInfoOption(A)) + return true; + D.Diag(diag::warn_drv_unsupported_debug_info_opt_for_target) + << A->getAsString(Args) << TC.getTripleString(); + return false; +} + void tools::AddAssemblerKPIC(const ToolChain &ToolChain, const ArgList &Args, ArgStringList &CmdArgs) { llvm::Reloc::Model RelocationModel; @@ -3315,20 +3346,16 @@ bool tools::shouldEnableVectorizerAtOLevel(const ArgList &Args, bool isSlpVec) { void tools::handleVectorizeLoopsArgs(const ArgList &Args, ArgStringList &CmdArgs) { bool EnableVec = shouldEnableVectorizerAtOLevel(Args, false); - OptSpecifier vectorizeAliasOption = - EnableVec ? options::OPT_O_Group : options::OPT_fvectorize; - if (Args.hasFlag(options::OPT_fvectorize, vectorizeAliasOption, - options::OPT_fno_vectorize, EnableVec)) + if (Args.hasFlag(options::OPT_fvectorize, options::OPT_fno_vectorize, + EnableVec)) CmdArgs.push_back("-vectorize-loops"); } void tools::handleVectorizeSLPArgs(const ArgList &Args, ArgStringList &CmdArgs) { bool EnableSLPVec = shouldEnableVectorizerAtOLevel(Args, true); - OptSpecifier SLPVectAliasOption = - EnableSLPVec ? options::OPT_O_Group : options::OPT_fslp_vectorize; - if (Args.hasFlag(options::OPT_fslp_vectorize, SLPVectAliasOption, - options::OPT_fno_slp_vectorize, EnableSLPVec)) + if (Args.hasFlag(options::OPT_fslp_vectorize, options::OPT_fno_slp_vectorize, + EnableSLPVec)) CmdArgs.push_back("-vectorize-slp"); } diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index 6fc372eb75eb7..a5394813eeb97 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -120,7 +120,11 @@ static bool shouldLoopVersion(const ArgList &Args) { return false; } -void Flang::addOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const { +void Flang::addDebugOptions(const llvm::opt::ArgList &Args, const JobAction &JA, + const InputInfo &Output, const InputInfo &Input, + llvm::opt::ArgStringList &CmdArgs) const { + const auto &TC = getToolChain(); + const Driver &D = TC.getDriver(); Args.addAllArgs(CmdArgs, {options::OPT_module_dir, options::OPT_fdebug_module_writer, options::OPT_fintrinsic_modules_path, options::OPT_pedantic, @@ -131,20 +135,60 @@ void Flang::addOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const { options::OPT_finstrument_functions}); llvm::codegenoptions::DebugInfoKind DebugInfoKind; + bool hasDwarfNArg = getDwarfNArg(Args) != nullptr; if (Args.hasArg(options::OPT_gN_Group)) { Arg *gNArg = Args.getLastArg(options::OPT_gN_Group); DebugInfoKind = debugLevelToInfoKind(*gNArg); - } else if (Args.hasArg(options::OPT_g_Group)) { + } else if (Args.hasArg(options::OPT_g_Flag) || hasDwarfNArg) { DebugInfoKind = llvm::codegenoptions::FullDebugInfo; } else { DebugInfoKind = llvm::codegenoptions::NoDebugInfo; } addDebugInfoKind(CmdArgs, DebugInfoKind); - if (getDwarfNArg(Args)) { + if (hasDwarfNArg) { const unsigned DwarfVersion = getDwarfVersion(getToolChain(), Args); CmdArgs.push_back( Args.MakeArgString("-dwarf-version=" + Twine(DwarfVersion))); } + if (Args.hasArg(options::OPT_gsplit_dwarf) || + Args.hasArg(options::OPT_gsplit_dwarf_EQ)) { + // FIXME: -gsplit-dwarf on AIX is currently unimplemented. + if (TC.getTriple().isOSAIX()) { + D.Diag(diag::err_drv_unsupported_opt_for_target) + << Args.getLastArg(options::OPT_gsplit_dwarf)->getSpelling() + << TC.getTriple().str(); + return; + } + if (DebugInfoKind == llvm::codegenoptions::NoDebugInfo) + return; + + Arg *SplitDWARFArg; + DwarfFissionKind DwarfFission = getDebugFissionKind(D, Args, SplitDWARFArg); + + if (DwarfFission == DwarfFissionKind::None || + !checkDebugInfoOption(SplitDWARFArg, Args, D, TC)) + return; + + if (!TC.getTriple().isOSBinFormatELF() && + !TC.getTriple().isOSBinFormatWasm() && + !TC.getTriple().isOSBinFormatCOFF()) { + D.Diag(diag::warn_drv_unsupported_debug_info_opt_for_target) + << SplitDWARFArg->getSpelling() << TC.getTriple().str(); + return; + } + + if (!isa(JA) && !isa(JA) && + isa(JA)) + return; + + const char *SplitDWARFOut = SplitDebugName(JA, Args, Input, Output); + CmdArgs.push_back("-split-dwarf-file"); + CmdArgs.push_back(SplitDWARFOut); + if (DwarfFission == DwarfFissionKind::Split) { + CmdArgs.push_back("-split-dwarf-output"); + CmdArgs.push_back(SplitDWARFOut); + } + } } void Flang::addCodegenOptions(const ArgList &Args, @@ -936,8 +980,8 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA, if (willEmitRemarks(Args)) renderRemarksOptions(Args, CmdArgs, Input); - // Add other compile options - addOtherOptions(Args, CmdArgs); + // Add debug compile options + addDebugOptions(Args, JA, Output, Input, CmdArgs); // Disable all warnings // TODO: Handle interactions between -w, -pedantic, -Wall, -WOption diff --git a/clang/lib/Driver/ToolChains/Flang.h b/clang/lib/Driver/ToolChains/Flang.h index 98167e1b75e15..c0837b80c032e 100644 --- a/clang/lib/Driver/ToolChains/Flang.h +++ b/clang/lib/Driver/ToolChains/Flang.h @@ -125,12 +125,16 @@ class LLVM_LIBRARY_VISIBILITY Flang : public Tool { void addCodegenOptions(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const; - /// Extract other compilation options from the driver arguments and add them + /// Extract debug compilation options from the driver arguments and add them /// to the command arguments. /// /// \param [in] Args The list of input driver arguments + /// \param [in] JA The job action + /// \param [in] Output The output information on the current file output + /// \param [in] Input The input information on the current file input /// \param [out] CmdArgs The list of output command arguments - void addOtherOptions(const llvm::opt::ArgList &Args, + void addDebugOptions(const llvm::opt::ArgList &Args, const JobAction &JA, + const InputInfo &Output, const InputInfo &Input, llvm::opt::ArgStringList &CmdArgs) const; public: diff --git a/clang/lib/Format/QualifierAlignmentFixer.cpp b/clang/lib/Format/QualifierAlignmentFixer.cpp index 441a37a4902b7..043d957611b19 100644 --- a/clang/lib/Format/QualifierAlignmentFixer.cpp +++ b/clang/lib/Format/QualifierAlignmentFixer.cpp @@ -571,7 +571,7 @@ void LeftRightQualifierAlignmentFixer::fixQualifierAlignment( for (const auto *Tok = First; Tok && Tok != Last && Tok->Next; Tok = Tok->Next) { - if (Tok->MustBreakBefore) + if (Tok->MustBreakBefore && Tok != First) break; if (Tok->is(tok::comment)) continue; diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp index 6a8286da73442..67066a104d738 100644 --- a/clang/lib/Format/TokenAnnotator.cpp +++ b/clang/lib/Format/TokenAnnotator.cpp @@ -833,11 +833,6 @@ class AnnotatingParser { if (Parent && Parent->is(TT_PointerOrReference)) Parent->overwriteFixedType(TT_BinaryOperator); } - // An arrow after an ObjC method expression is not a lambda arrow. - if (CurrentToken->is(TT_ObjCMethodExpr) && CurrentToken->Next && - CurrentToken->Next->is(TT_LambdaArrow)) { - CurrentToken->Next->overwriteFixedType(TT_Unknown); - } Left->MatchingParen = CurrentToken; CurrentToken->MatchingParen = Left; // FirstObjCSelectorName is set when a colon is found. This does diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp index 2c9766c9b7bc0..6948b3de1e408 100644 --- a/clang/lib/Format/UnwrappedLineParser.cpp +++ b/clang/lib/Format/UnwrappedLineParser.cpp @@ -2268,7 +2268,7 @@ bool UnwrappedLineParser::tryToParseLambda() { if (!tryToParseLambdaIntroducer()) return false; - bool SeenArrow = false; + FormatToken *Arrow = nullptr; bool InTemplateParameterList = false; while (FormatTok->isNot(tok::l_brace)) { @@ -2343,17 +2343,13 @@ bool UnwrappedLineParser::tryToParseLambda() { case tok::ellipsis: case tok::kw_true: case tok::kw_false: - if (SeenArrow || InTemplateParameterList) { + if (Arrow || InTemplateParameterList) { nextToken(); break; } return true; case tok::arrow: - // This might or might not actually be a lambda arrow (this could be an - // ObjC method invocation followed by a dereferencing arrow). We might - // reset this back to TT_Unknown in TokenAnnotator. - FormatTok->setFinalizedType(TT_LambdaArrow); - SeenArrow = true; + Arrow = FormatTok; nextToken(); break; case tok::kw_requires: { @@ -2375,6 +2371,9 @@ bool UnwrappedLineParser::tryToParseLambda() { FormatTok->setFinalizedType(TT_LambdaLBrace); LSquare.setFinalizedType(TT_LambdaLSquare); + if (Arrow) + Arrow->setFinalizedType(TT_LambdaArrow); + NestedLambdas.push_back(Line->SeenDecltypeAuto); parseChildBlock(); assert(!NestedLambdas.empty()); @@ -2388,11 +2387,6 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() { const FormatToken *LeftSquare = FormatTok; nextToken(); if (Previous) { - if (Previous->Tok.getIdentifierInfo() && - !Previous->isOneOf(tok::kw_return, tok::kw_co_await, tok::kw_co_yield, - tok::kw_co_return)) { - return false; - } if (Previous->closesScope()) { // Not a potential C-style cast. if (Previous->isNot(tok::r_paren)) @@ -2402,6 +2396,13 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() { // and `int (*)()`. if (!BeforeRParen || !BeforeRParen->isOneOf(tok::greater, tok::r_paren)) return false; + } else if (Previous->is(tok::star)) { + Previous = Previous->getPreviousNonComment(); + } + if (Previous && Previous->Tok.getIdentifierInfo() && + !Previous->isOneOf(tok::kw_return, tok::kw_co_await, tok::kw_co_yield, + tok::kw_co_return)) { + return false; } } if (LeftSquare->isCppStructuredBinding(IsCpp)) diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp index d6f3aec981336..c989ad2e5155c 100644 --- a/clang/lib/Frontend/CompilerInstance.cpp +++ b/clang/lib/Frontend/CompilerInstance.cpp @@ -503,7 +503,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) { // then we're the top level compiler instance and need to create one. if (!ModuleDepCollector && !DepOpts.ModuleDependencyOutputDir.empty()) { ModuleDepCollector = std::make_shared( - DepOpts.ModuleDependencyOutputDir); + DepOpts.ModuleDependencyOutputDir, getVirtualFileSystemPtr()); } // If there is a module dep collector, register with other dep collectors diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp index edf0a091e087c..877ab02850667 100644 --- a/clang/lib/Frontend/InitPreprocessor.cpp +++ b/clang/lib/Frontend/InitPreprocessor.cpp @@ -742,7 +742,10 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts, Builder.defineMacro("__cpp_impl_coroutine", "201902L"); Builder.defineMacro("__cpp_designated_initializers", "201707L"); Builder.defineMacro("__cpp_impl_three_way_comparison", "201907L"); - //Builder.defineMacro("__cpp_modules", "201907L"); + // Intentionally to set __cpp_modules to 1. + // See https://github.com/llvm/llvm-project/issues/71364 for details. + // Builder.defineMacro("__cpp_modules", "201907L"); + Builder.defineMacro("__cpp_modules", "1"); Builder.defineMacro("__cpp_using_enum", "201907L"); } // C++23 features. diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp index f28a74f5d0ae5..ae09f70ee7896 100644 --- a/clang/lib/Lex/HeaderSearch.cpp +++ b/clang/lib/Lex/HeaderSearch.cpp @@ -672,9 +672,8 @@ OptionalFileEntryRef DirectoryLookup::DoFrameworkLookup( if (getDirCharacteristic() == SrcMgr::C_User) { SmallString<1024> SystemFrameworkMarker(FrameworkName); SystemFrameworkMarker += ".system_framework"; - if (llvm::sys::fs::exists(SystemFrameworkMarker)) { + if (FileMgr.getOptionalFileRef(SystemFrameworkMarker)) CacheEntry.IsUserSpecifiedSystemFramework = true; - } } } diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp index 02f3f109b2562..04f29c83dd457 100644 --- a/clang/lib/Parse/ParseOpenMP.cpp +++ b/clang/lib/Parse/ParseOpenMP.cpp @@ -2968,6 +2968,39 @@ OMPClause *Parser::ParseOpenMPSizesClause() { OpenLoc, CloseLoc); } +OMPClause *Parser::ParseOpenMPLoopRangeClause() { + SourceLocation ClauseNameLoc = ConsumeToken(); + SourceLocation FirstLoc, CountLoc; + + BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end); + if (T.consumeOpen()) { + Diag(Tok, diag::err_expected) << tok::l_paren; + return nullptr; + } + + FirstLoc = Tok.getLocation(); + ExprResult FirstVal = ParseConstantExpression(); + if (!FirstVal.isUsable()) { + T.skipToEnd(); + return nullptr; + } + + ExpectAndConsume(tok::comma); + + CountLoc = Tok.getLocation(); + ExprResult CountVal = ParseConstantExpression(); + if (!CountVal.isUsable()) { + T.skipToEnd(); + return nullptr; + } + + T.consumeClose(); + + return Actions.OpenMP().ActOnOpenMPLoopRangeClause( + FirstVal.get(), CountVal.get(), ClauseNameLoc, T.getOpenLocation(), + FirstLoc, CountLoc, T.getCloseLocation()); +} + OMPClause *Parser::ParseOpenMPPermutationClause() { SourceLocation ClauseNameLoc, OpenLoc, CloseLoc; SmallVector ArgExprs; @@ -3473,6 +3506,9 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind, } Clause = ParseOpenMPClause(CKind, WrongDirective); break; + case OMPC_looprange: + Clause = ParseOpenMPLoopRangeClause(); + break; default: break; } diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp index 552c92996dc2e..a0483c3027199 100644 --- a/clang/lib/Sema/SemaExceptionSpec.cpp +++ b/clang/lib/Sema/SemaExceptionSpec.cpp @@ -1493,6 +1493,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) { case Stmt::OMPUnrollDirectiveClass: case Stmt::OMPReverseDirectiveClass: case Stmt::OMPInterchangeDirectiveClass: + case Stmt::OMPFuseDirectiveClass: case Stmt::OMPSingleDirectiveClass: case Stmt::OMPTargetDataDirectiveClass: case Stmt::OMPTargetDirectiveClass: diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp index 48e06d1dc7579..f5feed6206494 100644 --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -4569,6 +4569,7 @@ void SemaOpenMP::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, case OMPD_unroll: case OMPD_reverse: case OMPD_interchange: + case OMPD_fuse: case OMPD_assume: break; default: @@ -6410,6 +6411,10 @@ StmtResult SemaOpenMP::ActOnOpenMPExecutableDirective( Res = ActOnOpenMPInterchangeDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; + case OMPD_fuse: + Res = + ActOnOpenMPFuseDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); + break; case OMPD_for: Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); @@ -9488,7 +9493,9 @@ static bool checkOpenMPIterationSpace( // sharing attributes. VarsWithImplicitDSA.erase(LCDecl); - assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars"); + assert((isOpenMPLoopDirective(DKind) || + isOpenMPCanonicalLoopSequenceTransformationDirective(DKind)) && + "DSA for non-loop vars"); // Check test-expr. HasErrors |= ISC.checkAndSetCond(For ? For->getCond() : CXXFor->getCond()); @@ -9916,7 +9923,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr, unsigned NumLoops = std::max(OrderedLoopCount, NestedLoopCount); SmallVector IterSpaces(NumLoops); if (!OMPLoopBasedDirective::doForAllLoops( - AStmt->IgnoreContainers(!isOpenMPLoopTransformationDirective(DKind)), + AStmt->IgnoreContainers( + !isOpenMPCanonicalLoopNestTransformationDirective(DKind)), SupportsNonPerfectlyNested, NumLoops, [DKind, &SemaRef, &DSA, NumLoops, NestedLoopCount, CollapseLoopCountExpr, OrderedLoopCountExpr, &VarsWithImplicitDSA, @@ -9938,8 +9946,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr, } return false; }, - [&SemaRef, - &Captures](OMPCanonicalLoopNestTransformationDirective *Transform) { + [&SemaRef, &Captures](OMPLoopTransformationDirective *Transform) { Stmt *DependentPreInits = Transform->getPreInits(); if (!DependentPreInits) return; @@ -9954,7 +9961,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr, auto *D = cast(C); DeclRefExpr *Ref = buildDeclRefExpr( SemaRef, D, D->getType().getNonReferenceType(), - Transform->getBeginLoc()); + cast(Transform->getDirective()) + ->getBeginLoc()); Captures[Ref] = Ref; } } @@ -14404,10 +14412,34 @@ StmtResult SemaOpenMP::ActOnOpenMPTargetTeamsDistributeSimdDirective( getASTContext(), StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } +/// Updates OriginalInits by checking Transform against loop transformation +/// directives and appending their pre-inits if a match is found. +static void updatePreInits(OMPLoopTransformationDirective *Transform, + SmallVectorImpl &PreInits) { + Stmt *Dir = Transform->getDirective(); + switch (Dir->getStmtClass()) { +#define STMT(CLASS, PARENT) +#define ABSTRACT_STMT(CLASS) +#define COMMON_OMP_LOOP_TRANSFORMATION(CLASS, PARENT) \ + case Stmt::CLASS##Class: \ + appendFlattenedStmtList(PreInits, \ + static_cast(Dir)->getPreInits()); \ + break; +#define OMPCANONICALLOOPNESTTRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + COMMON_OMP_LOOP_TRANSFORMATION(CLASS, PARENT) +#define OMPCANONICALLOOPSEQUENCETRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + COMMON_OMP_LOOP_TRANSFORMATION(CLASS, PARENT) +#include "clang/AST/StmtNodes.inc" +#undef COMMON_OMP_LOOP_TRANSFORMATION + default: + llvm_unreachable("Not a loop transformation"); + } +} + bool SemaOpenMP::checkTransformableLoopNest( OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops, SmallVectorImpl &LoopHelpers, - Stmt *&Body, SmallVectorImpl> &OriginalInits) { + Stmt *&Body, SmallVectorImpl> &OriginalInits) { OriginalInits.emplace_back(); bool Result = OMPLoopBasedDirective::doForAllLoops( AStmt->IgnoreContainers(), /*TryImperfectlyNestedLoops=*/false, NumLoops, @@ -14433,29 +14465,268 @@ bool SemaOpenMP::checkTransformableLoopNest( OriginalInits.emplace_back(); return false; }, - [&OriginalInits](OMPLoopBasedDirective *Transform) { - Stmt *DependentPreInits; - if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else if (auto *Dir = dyn_cast(Transform)) - DependentPreInits = Dir->getPreInits(); - else - llvm_unreachable("Unhandled loop transformation"); - - appendFlattenedStmtList(OriginalInits.back(), DependentPreInits); + [&OriginalInits](OMPLoopTransformationDirective *Transform) { + updatePreInits(Transform, OriginalInits.back()); }); assert(OriginalInits.back().empty() && "No preinit after innermost loop"); OriginalInits.pop_back(); return Result; } -/// Add preinit statements that need to be propageted from the selected loop. +/// Counts the total number of OpenMP canonical nested loops, including the +/// outermost loop (the original loop). PRECONDITION of this visitor is that it +/// must be invoked from the original loop to be analyzed. The traversal stops +/// for Decl's and Expr's given that they may contain inner loops that must not +/// be counted. +/// +/// Example AST structure for the code: +/// +/// int main() { +/// #pragma omp fuse +/// { +/// for (int i = 0; i < 100; i++) { <-- Outer loop +/// []() { +/// for(int j = 0; j < 100; j++) {} <-- NOT A LOOP (1) +/// }; +/// for(int j = 0; j < 5; ++j) {} <-- Inner loop +/// } +/// for (int r = 0; i < 100; i++) { <-- Outer loop +/// struct LocalClass { +/// void bar() { +/// for(int j = 0; j < 100; j++) {} <-- NOT A LOOP (2) +/// } +/// }; +/// for(int k = 0; k < 10; ++k) {} <-- Inner loop +/// {x = 5; for(k = 0; k < 10; ++k) x += k; x}; <-- NOT A LOOP (3) +/// } +/// } +/// } +/// (1) because in a different function (here: a lambda) +/// (2) because in a different function (here: class method) +/// (3) because considered to be intervening-code of non-perfectly nested loop +/// Result: Loop 'i' contains 2 loops, Loop 'r' also contains 2 loops. +class NestedLoopCounterVisitor final : public DynamicRecursiveASTVisitor { +private: + unsigned NestedLoopCount = 0; + +public: + explicit NestedLoopCounterVisitor() = default; + + unsigned getNestedLoopCount() const { return NestedLoopCount; } + + bool VisitForStmt(ForStmt *FS) override { + ++NestedLoopCount; + return true; + } + + bool VisitCXXForRangeStmt(CXXForRangeStmt *FRS) override { + ++NestedLoopCount; + return true; + } + + bool TraverseStmt(Stmt *S) override { + if (!S) + return true; + + // Skip traversal of all expressions, including special cases like + // LambdaExpr, StmtExpr, BlockExpr, and RequiresExpr. These expressions + // may contain inner statements (and even loops), but they are not part + // of the syntactic body of the surrounding loop structure. + // Therefore must not be counted. + if (isa(S)) + return true; + + // Only recurse into CompoundStmt (block {}) and loop bodies. + if (isa(S)) { + return DynamicRecursiveASTVisitor::TraverseStmt(S); + } + + // Stop traversal of the rest of statements, that break perfect + // loop nesting, such as control flow (IfStmt, SwitchStmt...). + return true; + } + + bool TraverseDecl(Decl *D) override { + // Stop in the case of finding a declaration, it is not important + // in order to find nested loops (Possible CXXRecordDecl, RecordDecl, + // FunctionDecl...). + return true; + } +}; + +bool SemaOpenMP::analyzeLoopSequence(Stmt *LoopSeqStmt, + LoopSequenceAnalysis &SeqAnalysis, + ASTContext &Context, + OpenMPDirectiveKind Kind) { + VarsWithInheritedDSAType TmpDSA; + // Helper Lambda to handle storing initialization and body statements for + // both ForStmt and CXXForRangeStmt. + auto StoreLoopStatements = [](LoopAnalysis &Analysis, Stmt *LoopStmt) { + if (auto *For = dyn_cast(LoopStmt)) { + Analysis.OriginalInits.push_back(For->getInit()); + Analysis.TheForStmt = For; + } else { + auto *CXXFor = cast(LoopStmt); + Analysis.OriginalInits.push_back(CXXFor->getBeginStmt()); + Analysis.TheForStmt = CXXFor; + } + }; + + // Helper lambda functions to encapsulate the processing of different + // derivations of the canonical loop sequence grammar + // Modularized code for handling loop generation and transformations. + auto AnalyzeLoopGeneration = [&](Stmt *Child) { + auto *LoopTransform = cast(Child); + Stmt *TransformedStmt = LoopTransform->getTransformedStmt(); + unsigned NumGeneratedTopLevelLoops = + LoopTransform->getNumGeneratedTopLevelLoops(); + // Handle the case where transformed statement is not available due to + // dependent contexts + if (!TransformedStmt) { + if (NumGeneratedTopLevelLoops > 0) { + SeqAnalysis.LoopSeqSize += NumGeneratedTopLevelLoops; + return true; + } + // Unroll full (0 loops produced) + Diag(Child->getBeginLoc(), diag::err_omp_not_for) + << 0 << getOpenMPDirectiveName(Kind); + return false; + } + // Handle loop transformations with multiple loop nests + // Unroll full + if (!NumGeneratedTopLevelLoops) { + Diag(Child->getBeginLoc(), diag::err_omp_not_for) + << 0 << getOpenMPDirectiveName(Kind); + return false; + } + // Loop transformatons such as split or loopranged fuse + if (NumGeneratedTopLevelLoops > 1) { + // Get the preinits related to this loop sequence generating + // loop transformation (i.e loopranged fuse, split...) + // These preinits differ slightly from regular inits/pre-inits related + // to single loop generating loop transformations (interchange, unroll) + // given that they are not bounded to a particular loop nest + // so they need to be treated independently + updatePreInits(LoopTransform, SeqAnalysis.LoopSequencePreInits); + return analyzeLoopSequence(TransformedStmt, SeqAnalysis, Context, Kind); + } + // Vast majority: (Tile, Unroll, Stripe, Reverse, Interchange, Fuse all) + // Process the transformed loop statement + LoopAnalysis &NewTransformedSingleLoop = + SeqAnalysis.Loops.emplace_back(Child); + unsigned IsCanonical = checkOpenMPLoop( + Kind, nullptr, nullptr, TransformedStmt, SemaRef, *DSAStack, TmpDSA, + NewTransformedSingleLoop.HelperExprs); + + if (!IsCanonical) + return false; + + StoreLoopStatements(NewTransformedSingleLoop, TransformedStmt); + updatePreInits(LoopTransform, NewTransformedSingleLoop.TransformsPreInits); + + SeqAnalysis.LoopSeqSize++; + return true; + }; + + // Modularized code for handling regular canonical loops. + auto AnalyzeRegularLoop = [&](Stmt *Child) { + LoopAnalysis &NewRegularLoop = SeqAnalysis.Loops.emplace_back(Child); + unsigned IsCanonical = + checkOpenMPLoop(Kind, nullptr, nullptr, Child, SemaRef, *DSAStack, + TmpDSA, NewRegularLoop.HelperExprs); + + if (!IsCanonical) + return false; + + StoreLoopStatements(NewRegularLoop, Child); + NestedLoopCounterVisitor NLCV; + NLCV.TraverseStmt(Child); + return true; + }; + + // High level grammar validation. + for (Stmt *Child : LoopSeqStmt->children()) { + if (!Child) + continue; + // Skip over non-loop-sequence statements. + if (!LoopSequenceAnalysis::isLoopSequenceDerivation(Child)) { + Child = Child->IgnoreContainers(); + // Ignore empty compound statement. + if (!Child) + continue; + // In the case of a nested loop sequence ignoring containers would not + // be enough, a recurisve transversal of the loop sequence is required. + if (isa(Child)) { + if (!analyzeLoopSequence(Child, SeqAnalysis, Context, Kind)) + return false; + // Already been treated, skip this children + continue; + } + } + // Regular loop sequence handling. + if (LoopSequenceAnalysis::isLoopSequenceDerivation(Child)) { + if (LoopAnalysis::isLoopTransformation(Child)) { + if (!AnalyzeLoopGeneration(Child)) + return false; + // AnalyzeLoopGeneration updates SeqAnalysis.LoopSeqSize accordingly. + } else { + if (!AnalyzeRegularLoop(Child)) + return false; + SeqAnalysis.LoopSeqSize++; + } + } else { + // Report error for invalid statement inside canonical loop sequence. + Diag(Child->getBeginLoc(), diag::err_omp_not_for) + << 0 << getOpenMPDirectiveName(Kind); + return false; + } + } + return true; +} + +bool SemaOpenMP::checkTransformableLoopSequence( + OpenMPDirectiveKind Kind, Stmt *AStmt, LoopSequenceAnalysis &SeqAnalysis, + ASTContext &Context) { + // Following OpenMP 6.0 API Specification, a Canonical Loop Sequence follows + // the grammar: + // + // canonical-loop-sequence: + // { + // loop-sequence+ + // } + // where loop-sequence can be any of the following: + // 1. canonical-loop-sequence + // 2. loop-nest + // 3. loop-sequence-generating-construct (i.e OMPLoopTransformationDirective) + // + // To recognise and traverse this structure the helper function + // analyzeLoopSequence serves as the recurisve entry point + // and tries to match the input AST to the canonical loop sequence grammar + // structure. This function will perform both a semantic and syntactical + // analysis of the given statement according to OpenMP 6.0 definition of + // the aforementioned canonical loop sequence. + + // We expect an outer compound statement. + if (!isa(AStmt)) { + Diag(AStmt->getBeginLoc(), diag::err_omp_not_a_loop_sequence) + << getOpenMPDirectiveName(Kind); + return false; + } + + // Recursive entry point to process the main loop sequence + if (!analyzeLoopSequence(AStmt, SeqAnalysis, Context, Kind)) + return false; + + // Diagnose an empty loop sequence. + if (!SeqAnalysis.LoopSeqSize) { + Diag(AStmt->getBeginLoc(), diag::err_omp_empty_loop_sequence) + << getOpenMPDirectiveName(Kind); + return false; + } + return true; +} + +/// Add preinit statements that need to be propagated from the selected loop. static void addLoopPreInits(ASTContext &Context, OMPLoopBasedDirective::HelperExprs &LoopHelper, Stmt *LoopStmt, ArrayRef OriginalInit, @@ -14540,7 +14811,7 @@ StmtResult SemaOpenMP::ActOnOpenMPTileDirective(ArrayRef Clauses, // Verify and diagnose loop nest. SmallVector LoopHelpers(NumLoops); Stmt *Body = nullptr; - SmallVector, 4> OriginalInits; + SmallVector, 4> OriginalInits; if (!checkTransformableLoopNest(OMPD_tile, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -14817,7 +15088,7 @@ StmtResult SemaOpenMP::ActOnOpenMPStripeDirective(ArrayRef Clauses, // Verify and diagnose loop nest. SmallVector LoopHelpers(NumLoops); Stmt *Body = nullptr; - SmallVector, 4> OriginalInits; + SmallVector, 4> OriginalInits; if (!checkTransformableLoopNest(OMPD_stripe, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -15078,7 +15349,7 @@ StmtResult SemaOpenMP::ActOnOpenMPUnrollDirective(ArrayRef Clauses, Stmt *Body = nullptr; SmallVector LoopHelpers( NumLoops); - SmallVector, NumLoops + 1> OriginalInits; + SmallVector, NumLoops + 1> OriginalInits; if (!checkTransformableLoopNest(OMPD_unroll, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -15348,7 +15619,7 @@ StmtResult SemaOpenMP::ActOnOpenMPReverseDirective(Stmt *AStmt, Stmt *Body = nullptr; SmallVector LoopHelpers( NumLoops); - SmallVector, NumLoops + 1> OriginalInits; + SmallVector, NumLoops + 1> OriginalInits; if (!checkTransformableLoopNest(OMPD_reverse, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -15540,7 +15811,7 @@ StmtResult SemaOpenMP::ActOnOpenMPInterchangeDirective( // Verify and diagnose loop nest. SmallVector LoopHelpers(NumLoops); Stmt *Body = nullptr; - SmallVector, 2> OriginalInits; + SmallVector, 2> OriginalInits; if (!checkTransformableLoopNest(OMPD_interchange, AStmt, NumLoops, LoopHelpers, Body, OriginalInits)) return StmtError(); @@ -15716,6 +15987,484 @@ StmtResult SemaOpenMP::ActOnOpenMPInterchangeDirective( buildPreInits(Context, PreInits)); } +StmtResult SemaOpenMP::ActOnOpenMPFuseDirective(ArrayRef Clauses, + Stmt *AStmt, + SourceLocation StartLoc, + SourceLocation EndLoc) { + + ASTContext &Context = getASTContext(); + DeclContext *CurrContext = SemaRef.CurContext; + Scope *CurScope = SemaRef.getCurScope(); + CaptureVars CopyTransformer(SemaRef); + + // Ensure the structured block is not empty + if (!AStmt) + return StmtError(); + + // Defer transformation in dependent contexts + // The NumLoopNests argument is set to a placeholder 1 (even though + // using looprange fuse could yield up to 3 top level loop nests) + // because a dependent context could prevent determining its true value + if (CurrContext->isDependentContext()) + return OMPFuseDirective::Create(Context, StartLoc, EndLoc, Clauses, + /* NumLoops */ 1, AStmt, nullptr, nullptr); + + // Validate that the potential loop sequence is transformable for fusion + // Also collect the HelperExprs, Loop Stmts, Inits, and Number of loops + LoopSequenceAnalysis SeqAnalysis; + if (!checkTransformableLoopSequence(OMPD_fuse, AStmt, SeqAnalysis, Context)) + return StmtError(); + + // SeqAnalysis.LoopSeqSize exists mostly to handle dependent contexts, + // otherwise it must be the same as SeqAnalysis.Loops.size(). + assert(SeqAnalysis.LoopSeqSize == SeqAnalysis.Loops.size() && + "Inconsistent size of the loop sequence and the number of loops " + "found in the sequence"); + + // Handle clauses, which can be any of the following: [looprange, apply] + const auto *LRC = + OMPExecutableDirective::getSingleClause(Clauses); + + // The clause arguments are invalidated if any error arises + // such as non-constant or non-positive arguments + if (LRC && (!LRC->getFirst() || !LRC->getCount())) + return StmtError(); + + // Delayed semantic check of LoopRange constraint + // Evaluates the loop range arguments and returns the first and count values + auto EvaluateLoopRangeArguments = [&Context](Expr *First, Expr *Count, + uint64_t &FirstVal, + uint64_t &CountVal) { + llvm::APSInt FirstInt = First->EvaluateKnownConstInt(Context); + llvm::APSInt CountInt = Count->EvaluateKnownConstInt(Context); + FirstVal = FirstInt.getZExtValue(); + CountVal = CountInt.getZExtValue(); + }; + + // OpenMP [6.0, Restrictions] + // first + count - 1 must not evaluate to a value greater than the + // loop sequence length of the associated canonical loop sequence. + auto ValidLoopRange = [](uint64_t FirstVal, uint64_t CountVal, + unsigned NumLoops) -> bool { + return FirstVal + CountVal - 1 <= NumLoops; + }; + uint64_t FirstVal = 1, CountVal = 0, LastVal = SeqAnalysis.LoopSeqSize; + + // Validates the loop range after evaluating the semantic information + // and ensures that the range is valid for the given loop sequence size. + // Expressions are evaluated at compile time to obtain constant values. + if (LRC) { + EvaluateLoopRangeArguments(LRC->getFirst(), LRC->getCount(), FirstVal, + CountVal); + if (CountVal == 1) + SemaRef.Diag(LRC->getCountLoc(), diag::warn_omp_redundant_fusion) + << getOpenMPDirectiveName(OMPD_fuse); + + if (!ValidLoopRange(FirstVal, CountVal, SeqAnalysis.LoopSeqSize)) { + SemaRef.Diag(LRC->getFirstLoc(), diag::err_omp_invalid_looprange) + << getOpenMPDirectiveName(OMPD_fuse) << FirstVal + << (FirstVal + CountVal - 1) << SeqAnalysis.LoopSeqSize; + return StmtError(); + } + + LastVal = FirstVal + CountVal - 1; + } + + // Complete fusion generates a single canonical loop nest + // However looprange clause may generate several loop nests + unsigned NumGeneratedTopLevelLoops = + LRC ? SeqAnalysis.LoopSeqSize - CountVal + 1 : 1; + + // Emit a warning for redundant loop fusion when the sequence contains only + // one loop. + if (SeqAnalysis.LoopSeqSize == 1) + SemaRef.Diag(AStmt->getBeginLoc(), diag::warn_omp_redundant_fusion) + << getOpenMPDirectiveName(OMPD_fuse); + + // Select the type with the largest bit width among all induction variables + QualType IVType = + SeqAnalysis.Loops[FirstVal - 1].HelperExprs.IterationVarRef->getType(); + for (unsigned I : llvm::seq(FirstVal, LastVal)) { + QualType CurrentIVType = + SeqAnalysis.Loops[I].HelperExprs.IterationVarRef->getType(); + if (Context.getTypeSize(CurrentIVType) > Context.getTypeSize(IVType)) { + IVType = CurrentIVType; + } + } + uint64_t IVBitWidth = Context.getIntWidth(IVType); + + // Create pre-init declarations for all loops lower bounds, upper bounds, + // strides and num-iterations for every top level loop in the fusion + SmallVector LBVarDecls; + SmallVector STVarDecls; + SmallVector NIVarDecls; + SmallVector UBVarDecls; + SmallVector IVVarDecls; + + // Helper lambda to create variables for bounds, strides, and other + // expressions. Generates both the variable declaration and the corresponding + // initialization statement. + auto CreateHelperVarAndStmt = + [&, &SemaRef = SemaRef](Expr *ExprToCopy, const std::string &BaseName, + unsigned I, bool NeedsNewVD = false) { + Expr *TransformedExpr = + AssertSuccess(CopyTransformer.TransformExpr(ExprToCopy)); + if (!TransformedExpr) + return std::pair(nullptr, StmtError()); + + auto Name = (Twine(".omp.") + BaseName + std::to_string(I)).str(); + + VarDecl *VD; + if (NeedsNewVD) { + VD = buildVarDecl(SemaRef, SourceLocation(), IVType, Name); + SemaRef.AddInitializerToDecl(VD, TransformedExpr, false); + } else { + // Create a unique variable name + DeclRefExpr *DRE = cast(TransformedExpr); + VD = cast(DRE->getDecl()); + VD->setDeclName(&SemaRef.PP.getIdentifierTable().get(Name)); + } + // Create the corresponding declaration statement + StmtResult DeclStmt = new (Context) class DeclStmt( + DeclGroupRef(VD), SourceLocation(), SourceLocation()); + return std::make_pair(VD, DeclStmt); + }; + + // PreInits hold a sequence of variable declarations that must be executed + // before the fused loop begins. These include bounds, strides, and other + // helper variables required for the transformation. Other loop transforms + // also contain their own preinits + SmallVector PreInits; + + // Update the general preinits using the preinits generated by loop sequence + // generating loop transformations. These preinits differ slightly from + // single-loop transformation preinits, as they can be detached from a + // specific loop inside multiple generated loop nests. This happens + // because certain helper variables, like '.omp.fuse.max', are introduced to + // handle fused iteration spaces and may not be directly tied to a single + // original loop. The preinit structure must ensure that hidden variables + // like '.omp.fuse.max' are still properly handled. + // Transformations that apply this concept: Loopranged Fuse, Split + llvm::append_range(PreInits, SeqAnalysis.LoopSequencePreInits); + + // Process each single loop to generate and collect declarations + // and statements for all helper expressions related to + // particular single loop nests + + // Also In the case of the fused loops, we keep track of their original + // inits by appending them to their preinits statement, and in the case of + // transformations, also append their preinits (which contain the original + // loop initialization statement or other statements) + + // Firstly we need to set TransformIndex to match the begining of the + // looprange section + unsigned int TransformIndex = 0; + for (unsigned I : llvm::seq(FirstVal - 1)) { + if (SeqAnalysis.Loops[I].isLoopTransformation()) + ++TransformIndex; + } + + for (unsigned int I = FirstVal - 1, J = 0; I < LastVal; ++I, ++J) { + if (SeqAnalysis.Loops[I].isRegularLoop()) { + addLoopPreInits(Context, SeqAnalysis.Loops[I].HelperExprs, + SeqAnalysis.Loops[I].TheForStmt, + SeqAnalysis.Loops[I].OriginalInits, PreInits); + } else if (SeqAnalysis.Loops[I].isLoopTransformation()) { + // For transformed loops, insert both pre-inits and original inits. + // Order matters: pre-inits may define variables used in the original + // inits such as upper bounds... + SmallVector &TransformPreInit = + SeqAnalysis.Loops[TransformIndex++].TransformsPreInits; + llvm::append_range(PreInits, TransformPreInit); + + addLoopPreInits(Context, SeqAnalysis.Loops[I].HelperExprs, + SeqAnalysis.Loops[I].TheForStmt, + SeqAnalysis.Loops[I].OriginalInits, PreInits); + } + auto [UBVD, UBDStmt] = + CreateHelperVarAndStmt(SeqAnalysis.Loops[I].HelperExprs.UB, "ub", J); + auto [LBVD, LBDStmt] = + CreateHelperVarAndStmt(SeqAnalysis.Loops[I].HelperExprs.LB, "lb", J); + auto [STVD, STDStmt] = + CreateHelperVarAndStmt(SeqAnalysis.Loops[I].HelperExprs.ST, "st", J); + auto [NIVD, NIDStmt] = CreateHelperVarAndStmt( + SeqAnalysis.Loops[I].HelperExprs.NumIterations, "ni", J, true); + auto [IVVD, IVDStmt] = CreateHelperVarAndStmt( + SeqAnalysis.Loops[I].HelperExprs.IterationVarRef, "iv", J); + + assert(LBVD && STVD && NIVD && IVVD && + "OpenMP Fuse Helper variables creation failed"); + + UBVarDecls.push_back(UBVD); + LBVarDecls.push_back(LBVD); + STVarDecls.push_back(STVD); + NIVarDecls.push_back(NIVD); + IVVarDecls.push_back(IVVD); + + PreInits.push_back(LBDStmt.get()); + PreInits.push_back(STDStmt.get()); + PreInits.push_back(NIDStmt.get()); + PreInits.push_back(IVDStmt.get()); + } + + auto MakeVarDeclRef = [&SemaRef = this->SemaRef](VarDecl *VD) { + return buildDeclRefExpr(SemaRef, VD, VD->getType(), VD->getLocation(), + false); + }; + + // Following up the creation of the final fused loop will be performed + // which has the following shape (considering the selected loops): + // + // for (fuse.index = 0; fuse.index < max(ni0, ni1..., nik); ++fuse.index) { + // if (fuse.index < ni0){ + // iv0 = lb0 + st0 * fuse.index; + // original.index0 = iv0 + // body(0); + // } + // if (fuse.index < ni1){ + // iv1 = lb1 + st1 * fuse.index; + // original.index1 = iv1 + // body(1); + // } + // + // ... + // + // if (fuse.index < nik){ + // ivk = lbk + stk * fuse.index; + // original.indexk = ivk + // body(k); Expr *InitVal = IntegerLiteral::Create(Context, + // llvm::APInt(IVWidth, 0), + // } + + // 1. Create the initialized fuse index + StringRef IndexName = ".omp.fuse.index"; + Expr *InitVal = IntegerLiteral::Create(Context, llvm::APInt(IVBitWidth, 0), + IVType, SourceLocation()); + VarDecl *IndexDecl = + buildVarDecl(SemaRef, {}, IVType, IndexName, nullptr, nullptr); + SemaRef.AddInitializerToDecl(IndexDecl, InitVal, false); + StmtResult InitStmt = new (Context) + DeclStmt(DeclGroupRef(IndexDecl), SourceLocation(), SourceLocation()); + + if (!InitStmt.isUsable()) + return StmtError(); + + auto MakeIVRef = [&SemaRef = this->SemaRef, IndexDecl, IVType, + Loc = InitVal->getExprLoc()]() { + return buildDeclRefExpr(SemaRef, IndexDecl, IVType, Loc, false); + }; + + // 2. Iteratively compute the max number of logical iterations Max(NI_1, NI_2, + // ..., NI_k) + // + // This loop accumulates the maximum value across multiple expressions, + // ensuring each step constructs a unique AST node for correctness. By using + // intermediate temporary variables and conditional operators, we maintain + // distinct nodes and avoid duplicating subtrees, For instance, max(a,b,c): + // omp.temp0 = max(a, b) + // omp.temp1 = max(omp.temp0, c) + // omp.fuse.max = max(omp.temp1, omp.temp0) + + ExprResult MaxExpr; + // I is the range of loops in the sequence that we fuse. + for (unsigned I = FirstVal - 1, J = 0; I < LastVal; ++I, ++J) { + DeclRefExpr *NIRef = MakeVarDeclRef(NIVarDecls[J]); + QualType NITy = NIRef->getType(); + + if (MaxExpr.isUnset()) { + // Initialize MaxExpr with the first NI expression + MaxExpr = NIRef; + } else { + // Create a new acummulator variable t_i = MaxExpr + std::string TempName = (Twine(".omp.temp.") + Twine(J)).str(); + VarDecl *TempDecl = + buildVarDecl(SemaRef, {}, NITy, TempName, nullptr, nullptr); + TempDecl->setInit(MaxExpr.get()); + DeclRefExpr *TempRef = + buildDeclRefExpr(SemaRef, TempDecl, NITy, SourceLocation(), false); + DeclRefExpr *TempRef2 = + buildDeclRefExpr(SemaRef, TempDecl, NITy, SourceLocation(), false); + // Add a DeclStmt to PreInits to ensure the variable is declared. + StmtResult TempStmt = new (Context) + DeclStmt(DeclGroupRef(TempDecl), SourceLocation(), SourceLocation()); + + if (!TempStmt.isUsable()) + return StmtError(); + PreInits.push_back(TempStmt.get()); + + // Build MaxExpr <-(MaxExpr > NIRef ? MaxExpr : NIRef) + ExprResult Comparison = + SemaRef.BuildBinOp(nullptr, SourceLocation(), BO_GT, TempRef, NIRef); + // Handle any errors in Comparison creation + if (!Comparison.isUsable()) + return StmtError(); + + DeclRefExpr *NIRef2 = MakeVarDeclRef(NIVarDecls[J]); + // Update MaxExpr using a conditional expression to hold the max value + MaxExpr = new (Context) ConditionalOperator( + Comparison.get(), SourceLocation(), TempRef2, SourceLocation(), + NIRef2->getExprStmt(), NITy, VK_LValue, OK_Ordinary); + + if (!MaxExpr.isUsable()) + return StmtError(); + } + } + if (!MaxExpr.isUsable()) + return StmtError(); + + // 3. Declare the max variable + const std::string MaxName = Twine(".omp.fuse.max").str(); + VarDecl *MaxDecl = + buildVarDecl(SemaRef, {}, IVType, MaxName, nullptr, nullptr); + MaxDecl->setInit(MaxExpr.get()); + DeclRefExpr *MaxRef = buildDeclRefExpr(SemaRef, MaxDecl, IVType, {}, false); + StmtResult MaxStmt = new (Context) + DeclStmt(DeclGroupRef(MaxDecl), SourceLocation(), SourceLocation()); + + if (MaxStmt.isInvalid()) + return StmtError(); + PreInits.push_back(MaxStmt.get()); + + // 4. Create condition Expr: index < n_max + ExprResult CondExpr = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LT, + MakeIVRef(), MaxRef); + if (!CondExpr.isUsable()) + return StmtError(); + + // 5. Increment Expr: ++index + ExprResult IncrExpr = + SemaRef.BuildUnaryOp(CurScope, SourceLocation(), UO_PreInc, MakeIVRef()); + if (!IncrExpr.isUsable()) + return StmtError(); + + // 6. Build the Fused Loop Body + // The final fused loop iterates over the maximum logical range. Inside the + // loop, each original loop's index is calculated dynamically, and its body + // is executed conditionally. + // + // Each sub-loop's body is guarded by a conditional statement to ensure + // it executes only within its logical iteration range: + // + // if (fuse.index < ni_k){ + // iv_k = lb_k + st_k * fuse.index; + // original.index = iv_k + // body(k); + // } + + CompoundStmt *FusedBody = nullptr; + SmallVector FusedBodyStmts; + for (unsigned I = FirstVal - 1, J = 0; I < LastVal; ++I, ++J) { + // Assingment of the original sub-loop index to compute the logical index + // IV_k = LB_k + omp.fuse.index * ST_k + ExprResult IdxExpr = + SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Mul, + MakeVarDeclRef(STVarDecls[J]), MakeIVRef()); + if (!IdxExpr.isUsable()) + return StmtError(); + IdxExpr = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Add, + MakeVarDeclRef(LBVarDecls[J]), IdxExpr.get()); + + if (!IdxExpr.isUsable()) + return StmtError(); + IdxExpr = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_Assign, + MakeVarDeclRef(IVVarDecls[J]), IdxExpr.get()); + if (!IdxExpr.isUsable()) + return StmtError(); + + // Update the original i_k = IV_k + SmallVector BodyStmts; + BodyStmts.push_back(IdxExpr.get()); + llvm::append_range(BodyStmts, SeqAnalysis.Loops[I].HelperExprs.Updates); + + // If the loop is a CXXForRangeStmt then the iterator variable is needed + if (auto *SourceCXXFor = + dyn_cast(SeqAnalysis.Loops[I].TheForStmt)) + BodyStmts.push_back(SourceCXXFor->getLoopVarStmt()); + + Stmt *Body = + (isa(SeqAnalysis.Loops[I].TheForStmt)) + ? cast(SeqAnalysis.Loops[I].TheForStmt)->getBody() + : cast(SeqAnalysis.Loops[I].TheForStmt)->getBody(); + BodyStmts.push_back(Body); + + CompoundStmt *CombinedBody = + CompoundStmt::Create(Context, BodyStmts, FPOptionsOverride(), + SourceLocation(), SourceLocation()); + ExprResult Condition = + SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LT, MakeIVRef(), + MakeVarDeclRef(NIVarDecls[J])); + + if (!Condition.isUsable()) + return StmtError(); + + IfStmt *IfStatement = IfStmt::Create( + Context, SourceLocation(), IfStatementKind::Ordinary, nullptr, nullptr, + Condition.get(), SourceLocation(), SourceLocation(), CombinedBody, + SourceLocation(), nullptr); + + FusedBodyStmts.push_back(IfStatement); + } + FusedBody = CompoundStmt::Create(Context, FusedBodyStmts, FPOptionsOverride(), + SourceLocation(), SourceLocation()); + + // 7. Construct the final fused loop + ForStmt *FusedForStmt = new (Context) + ForStmt(Context, InitStmt.get(), CondExpr.get(), nullptr, IncrExpr.get(), + FusedBody, InitStmt.get()->getBeginLoc(), SourceLocation(), + IncrExpr.get()->getEndLoc()); + + // In the case of looprange, the result of fuse won't simply + // be a single loop (ForStmt), but rather a loop sequence + // (CompoundStmt) of 3 parts: the pre-fusion loops, the fused loop + // and the post-fusion loops, preserving its original order. + // + // Note: If looprange clause produces a single fused loop nest then + // this compound statement wrapper is unnecessary (Therefore this + // treatment is skipped) + + Stmt *FusionStmt = FusedForStmt; + if (LRC && CountVal != SeqAnalysis.LoopSeqSize) { + SmallVector FinalLoops; + + // Reset the transform index + TransformIndex = 0; + + // Collect all non-fused loops before and after the fused region. + // Pre-fusion and post-fusion loops are inserted in order exploiting their + // symmetry, along with their corresponding transformation pre-inits if + // needed. The fused loop is added between the two regions. + for (unsigned I : llvm::seq(SeqAnalysis.LoopSeqSize)) { + if (I >= FirstVal - 1 && I < FirstVal + CountVal - 1) { + // Update the Transformation counter to skip already treated + // loop transformations + if (!SeqAnalysis.Loops[I].isLoopTransformation()) + ++TransformIndex; + continue; + } + + // No need to handle: + // Regular loops: they are kept intact as-is. + // Loop-sequence-generating transformations: already handled earlier. + // Only TransformSingleLoop requires inserting pre-inits here + if (SeqAnalysis.Loops[I].isRegularLoop()) { + const auto &TransformPreInit = + SeqAnalysis.Loops[TransformIndex++].TransformsPreInits; + if (!TransformPreInit.empty()) + llvm::append_range(PreInits, TransformPreInit); + } + + FinalLoops.push_back(SeqAnalysis.Loops[I].TheForStmt); + } + + FinalLoops.insert(FinalLoops.begin() + (FirstVal - 1), FusedForStmt); + FusionStmt = CompoundStmt::Create(Context, FinalLoops, FPOptionsOverride(), + SourceLocation(), SourceLocation()); + } + return OMPFuseDirective::Create(Context, StartLoc, EndLoc, Clauses, + NumGeneratedTopLevelLoops, AStmt, FusionStmt, + buildPreInits(Context, PreInits)); +} + OMPClause *SemaOpenMP::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, @@ -16887,6 +17636,31 @@ OMPClause *SemaOpenMP::ActOnOpenMPPartialClause(Expr *FactorExpr, FactorExpr); } +OMPClause *SemaOpenMP::ActOnOpenMPLoopRangeClause( + Expr *First, Expr *Count, SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation FirstLoc, SourceLocation CountLoc, SourceLocation EndLoc) { + + // OpenMP [6.0, Restrictions] + // First and Count must be integer expressions with positive value + ExprResult FirstVal = + VerifyPositiveIntegerConstantInClause(First, OMPC_looprange); + if (FirstVal.isInvalid()) + First = nullptr; + + ExprResult CountVal = + VerifyPositiveIntegerConstantInClause(Count, OMPC_looprange); + if (CountVal.isInvalid()) + Count = nullptr; + + // OpenMP [6.0, Restrictions] + // first + count - 1 must not evaluate to a value greater than the + // loop sequence length of the associated canonical loop sequence. + // This check must be performed afterwards due to the delayed + // parsing and computation of the associated loop sequence + return OMPLoopRangeClause::Create(getASTContext(), StartLoc, LParenLoc, + FirstLoc, CountLoc, EndLoc, First, Count); +} + OMPClause *SemaOpenMP::ActOnOpenMPAlignClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp index f051a246f954f..2bf1511c5cfa0 100644 --- a/clang/lib/Sema/SemaTemplate.cpp +++ b/clang/lib/Sema/SemaTemplate.cpp @@ -775,6 +775,40 @@ Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS, TemplateArgs); } +ExprResult Sema::BuildSubstNonTypeTemplateParmExpr( + Decl *AssociatedDecl, const NonTypeTemplateParmDecl *NTTP, + SourceLocation Loc, TemplateArgument Arg, UnsignedOrNone PackIndex, + bool Final) { + // The template argument itself might be an expression, in which case we just + // return that expression. This happens when substituting into an alias + // template. + Expr *Replacement; + bool refParam = true; + if (Arg.getKind() == TemplateArgument::Expression) { + Replacement = Arg.getAsExpr(); + refParam = Replacement->isLValue(); + if (refParam && Replacement->getType()->isRecordType()) { + QualType ParamType = + NTTP->isExpandedParameterPack() + ? NTTP->getExpansionType(*SemaRef.ArgPackSubstIndex) + : NTTP->getType(); + if (const auto *PET = dyn_cast(ParamType)) + ParamType = PET->getPattern(); + refParam = ParamType->isReferenceType(); + } + } else { + ExprResult result = + SemaRef.BuildExpressionFromNonTypeTemplateArgument(Arg, Loc); + if (result.isInvalid()) + return ExprError(); + Replacement = result.get(); + refParam = Arg.getNonTypeTemplateArgumentType()->isReferenceType(); + } + return new (SemaRef.Context) SubstNonTypeTemplateParmExpr( + Replacement->getType(), Replacement->getValueKind(), Loc, Replacement, + AssociatedDecl, NTTP->getIndex(), PackIndex, refParam, Final); +} + bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, @@ -7068,22 +7102,8 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType, // If the parameter type somehow involves auto, deduce the type now. DeducedType *DeducedT = ParamType->getContainedDeducedType(); - if (getLangOpts().CPlusPlus17 && DeducedT && !DeducedT->isDeduced()) { - // During template argument deduction, we allow 'decltype(auto)' to - // match an arbitrary dependent argument. - // FIXME: The language rules don't say what happens in this case. - // FIXME: We get an opaque dependent type out of decltype(auto) if the - // expression is merely instantiation-dependent; is this enough? - if (DeductionArg->isTypeDependent()) { - auto *AT = dyn_cast(DeducedT); - if (AT && AT->isDecltypeAuto()) { - SugaredConverted = TemplateArgument(Arg, /*IsCanonical=*/false); - CanonicalConverted = TemplateArgument( - Context.getCanonicalTemplateArgument(SugaredConverted)); - return Arg; - } - } - + bool IsDeduced = DeducedT && DeducedT->getDeducedType().isNull(); + if (IsDeduced) { // When checking a deduced template argument, deduce from its type even if // the type is dependent, in order to check the types of non-type template // arguments line up properly in partial ordering. @@ -7112,17 +7132,21 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType, // along with the other associated constraints after // checking the template argument list. /*IgnoreConstraints=*/true); - if (Result == TemplateDeductionResult::AlreadyDiagnosed) { - return ExprError(); - } else if (Result != TemplateDeductionResult::Success) { - if (const auto *NTTP = dyn_cast(Param)) { - Diag(Arg->getExprLoc(), - diag::err_non_type_template_parm_type_deduction_failure) - << Param->getDeclName() << NTTP->getType() << Arg->getType() - << Arg->getSourceRange(); + if (Result != TemplateDeductionResult::Success) { + ParamType = TSI->getType(); + if (StrictCheck || !DeductionArg->isTypeDependent()) { + if (Result == TemplateDeductionResult::AlreadyDiagnosed) + return ExprError(); + if (const auto *NTTP = dyn_cast(Param)) + Diag(Arg->getExprLoc(), + diag::err_non_type_template_parm_type_deduction_failure) + << Param->getDeclName() << NTTP->getType() << Arg->getType() + << Arg->getSourceRange(); + NoteTemplateParameterLocation(*Param); + return ExprError(); } - NoteTemplateParameterLocation(*Param); - return ExprError(); + ParamType = SubstAutoTypeDependent(ParamType); + assert(!ParamType.isNull() && "substituting DependentTy can't fail"); } } // CheckNonTypeTemplateParameterType will produce a diagnostic if there's @@ -7144,14 +7168,16 @@ ExprResult Sema::CheckTemplateArgument(NamedDecl *Param, QualType ParamType, // type-dependent, there's nothing we can check now. if (ParamType->isDependentType() || DeductionArg->isTypeDependent()) { // Force the argument to the type of the parameter to maintain invariants. - ExprResult E = ImpCastExprToType( - DeductionArg, ParamType.getNonLValueExprType(Context), CK_Dependent, - ParamType->isLValueReferenceType() ? VK_LValue - : ParamType->isRValueReferenceType() ? VK_XValue - : VK_PRValue); - if (E.isInvalid()) - return ExprError(); - setDeductionArg(E.get()); + if (!IsDeduced) { + ExprResult E = ImpCastExprToType( + DeductionArg, ParamType.getNonLValueExprType(Context), CK_Dependent, + ParamType->isLValueReferenceType() ? VK_LValue + : ParamType->isRValueReferenceType() ? VK_XValue + : VK_PRValue); + if (E.isInvalid()) + return ExprError(); + setDeductionArg(E.get()); + } SugaredConverted = TemplateArgument(Arg, /*IsCanonical=*/false); CanonicalConverted = TemplateArgument( Context.getCanonicalTemplateArgument(SugaredConverted)); @@ -8555,6 +8581,7 @@ static SourceRange findTemplateParameter(unsigned Depth, TypeLoc TL) { static bool CheckNonTypeTemplatePartialSpecializationArgs( Sema &S, SourceLocation TemplateNameLoc, NonTypeTemplateParmDecl *Param, const TemplateArgument *Args, unsigned NumArgs, bool IsDefaultArgument) { + bool HasError = false; for (unsigned I = 0; I != NumArgs; ++I) { if (Args[I].getKind() == TemplateArgument::Pack) { if (CheckNonTypeTemplatePartialSpecializationArgs( @@ -8569,6 +8596,10 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs( continue; Expr *ArgExpr = Args[I].getAsExpr(); + if (ArgExpr->containsErrors()) { + HasError = true; + continue; + } // We can have a pack expansion of any of the bullets below. if (PackExpansionExpr *Expansion = dyn_cast(ArgExpr)) @@ -8638,7 +8669,7 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs( } } - return false; + return HasError; } bool Sema::CheckTemplatePartialSpecializationArgs( diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp index 962fa4da75946..f6ee7452c2f9a 100644 --- a/clang/lib/Sema/SemaTemplateDeduction.cpp +++ b/clang/lib/Sema/SemaTemplateDeduction.cpp @@ -5262,18 +5262,6 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, SmallVector Deduced; Deduced.resize(1); - // If deduction failed, don't diagnose if the initializer is dependent; it - // might acquire a matching type in the instantiation. - auto DeductionFailed = [&](TemplateDeductionResult TDK) { - if (Init->isTypeDependent()) { - Result = - SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type); - assert(!Result.isNull() && "substituting DependentTy can't fail"); - return TemplateDeductionResult::Success; - } - return TDK; - }; - SmallVector OriginalCallArgs; QualType DeducedType; @@ -5323,9 +5311,9 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, Diag(Info.getLocation(), diag::err_auto_inconsistent_deduction) << Info.FirstArg << Info.SecondArg << DeducedFromInitRange << Init->getSourceRange(); - return DeductionFailed(TemplateDeductionResult::AlreadyDiagnosed); + return TemplateDeductionResult::AlreadyDiagnosed; } - return DeductionFailed(TDK); + return TDK; } if (DeducedFromInitRange.isInvalid() && @@ -5347,12 +5335,12 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, OriginalCallArgs, /*Decomposed=*/false, /*ArgIdx=*/0, /*TDF=*/0, FailedTSC); TDK != TemplateDeductionResult::Success) - return DeductionFailed(TDK); + return TDK; } // Could be null if somehow 'auto' appears in a non-deduced context. if (Deduced[0].getKind() != TemplateArgument::Type) - return DeductionFailed(TemplateDeductionResult::Incomplete); + return TemplateDeductionResult::Incomplete; DeducedType = Deduced[0].getAsType(); if (InitList) { @@ -5366,7 +5354,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, if (!Context.hasSameType(DeducedType, Result)) { Info.FirstArg = Result; Info.SecondArg = DeducedType; - return DeductionFailed(TemplateDeductionResult::Inconsistent); + return TemplateDeductionResult::Inconsistent; } DeducedType = Context.getCommonSugaredType(Result, DeducedType); } @@ -5390,7 +5378,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result, CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA); TDK != TemplateDeductionResult::Success) { Result = QualType(); - return DeductionFailed(TDK); + return TDK; } } @@ -5412,13 +5400,17 @@ TypeSourceInfo *Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, } QualType Sema::SubstAutoTypeDependent(QualType TypeWithAuto) { - return SubstituteDeducedTypeTransform(*this, DependentAuto{false}) + return SubstituteDeducedTypeTransform( + *this, + DependentAuto{/*IsPack=*/isa(TypeWithAuto)}) .TransformType(TypeWithAuto); } TypeSourceInfo * Sema::SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto) { - return SubstituteDeducedTypeTransform(*this, DependentAuto{false}) + return SubstituteDeducedTypeTransform( + *this, DependentAuto{/*IsPack=*/isa( + TypeWithAuto->getType())}) .TransformType(TypeWithAuto); } diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp index a72c95d6d77cf..f1c9c5c868159 100644 --- a/clang/lib/Sema/SemaTemplateInstantiate.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp @@ -616,29 +616,30 @@ Sema::InstantiatingTemplate::InstantiatingTemplate( Invalid = true; return; } - Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange); + + CodeSynthesisContext Inst; + Inst.Kind = Kind; + Inst.PointOfInstantiation = PointOfInstantiation; + Inst.Entity = Entity; + Inst.Template = Template; + Inst.TemplateArgs = TemplateArgs.data(); + Inst.NumTemplateArgs = TemplateArgs.size(); + Inst.DeductionInfo = DeductionInfo; + Inst.InstantiationRange = InstantiationRange; + Inst.InConstraintSubstitution = + Inst.Kind == CodeSynthesisContext::ConstraintSubstitution; + if (!SemaRef.CodeSynthesisContexts.empty()) + Inst.InConstraintSubstitution |= + SemaRef.CodeSynthesisContexts.back().InConstraintSubstitution; + + Invalid = SemaRef.pushCodeSynthesisContext(Inst); if (!Invalid) { - CodeSynthesisContext Inst; - Inst.Kind = Kind; - Inst.PointOfInstantiation = PointOfInstantiation; - Inst.Entity = Entity; - Inst.Template = Template; - Inst.TemplateArgs = TemplateArgs.data(); - Inst.NumTemplateArgs = TemplateArgs.size(); - Inst.DeductionInfo = DeductionInfo; - Inst.InstantiationRange = InstantiationRange; - Inst.InConstraintSubstitution = - Inst.Kind == CodeSynthesisContext::ConstraintSubstitution; - if (!SemaRef.CodeSynthesisContexts.empty()) - Inst.InConstraintSubstitution |= - SemaRef.CodeSynthesisContexts.back().InConstraintSubstitution; - - SemaRef.pushCodeSynthesisContext(Inst); - - AlreadyInstantiating = !Inst.Entity ? false : - !SemaRef.InstantiatingSpecializations - .insert({Inst.Entity->getCanonicalDecl(), Inst.Kind}) - .second; + AlreadyInstantiating = + !Inst.Entity + ? false + : !SemaRef.InstantiatingSpecializations + .insert({Inst.Entity->getCanonicalDecl(), Inst.Kind}) + .second; atTemplateBegin(SemaRef.TemplateInstCallbacks, SemaRef, Inst); } } @@ -834,18 +835,34 @@ Sema::InstantiatingTemplate::InstantiatingTemplate( : InstantiatingTemplate(SemaRef, CodeSynthesisContext::PartialOrderingTTP, ArgLoc, InstantiationRange, PArg) {} -void Sema::pushCodeSynthesisContext(CodeSynthesisContext Ctx) { +bool Sema::pushCodeSynthesisContext(CodeSynthesisContext Ctx) { Ctx.SavedInNonInstantiationSFINAEContext = InNonInstantiationSFINAEContext; InNonInstantiationSFINAEContext = false; - CodeSynthesisContexts.push_back(Ctx); - - if (!Ctx.isInstantiationRecord()) + if (!Ctx.isInstantiationRecord()) { ++NonInstantiationEntries; + } else { + assert(SemaRef.NonInstantiationEntries <= + SemaRef.CodeSynthesisContexts.size()); + if ((SemaRef.CodeSynthesisContexts.size() - + SemaRef.NonInstantiationEntries) > + SemaRef.getLangOpts().InstantiationDepth) { + SemaRef.Diag(Ctx.PointOfInstantiation, + diag::err_template_recursion_depth_exceeded) + << SemaRef.getLangOpts().InstantiationDepth << Ctx.InstantiationRange; + SemaRef.Diag(Ctx.PointOfInstantiation, + diag::note_template_recursion_depth) + << SemaRef.getLangOpts().InstantiationDepth; + return true; + } + } + + CodeSynthesisContexts.push_back(Ctx); // Check to see if we're low on stack space. We can't do anything about this // from here, but we can at least warn the user. StackHandler.warnOnStackNearlyExhausted(Ctx.PointOfInstantiation); + return false; } void Sema::popCodeSynthesisContext() { @@ -907,25 +924,6 @@ static std::string convertCallArgsToString(Sema &S, return Result; } -bool Sema::InstantiatingTemplate::CheckInstantiationDepth( - SourceLocation PointOfInstantiation, - SourceRange InstantiationRange) { - assert(SemaRef.NonInstantiationEntries <= - SemaRef.CodeSynthesisContexts.size()); - if ((SemaRef.CodeSynthesisContexts.size() - - SemaRef.NonInstantiationEntries) - <= SemaRef.getLangOpts().InstantiationDepth) - return false; - - SemaRef.Diag(PointOfInstantiation, - diag::err_template_recursion_depth_exceeded) - << SemaRef.getLangOpts().InstantiationDepth - << InstantiationRange; - SemaRef.Diag(PointOfInstantiation, diag::note_template_recursion_depth) - << SemaRef.getLangOpts().InstantiationDepth; - return true; -} - void Sema::PrintInstantiationStack(InstantiationContextDiagFuncRef DiagFunc) { // Determine which template instantiations to skip, if any. unsigned SkipStart = CodeSynthesisContexts.size(), SkipEnd = SkipStart; @@ -1373,16 +1371,6 @@ std::optional Sema::isSFINAEContext() const { return std::nullopt; } -static TemplateArgument -getPackSubstitutedTemplateArgument(Sema &S, TemplateArgument Arg) { - assert(S.ArgPackSubstIndex); - assert(*S.ArgPackSubstIndex < Arg.pack_size()); - Arg = Arg.pack_begin()[*S.ArgPackSubstIndex]; - if (Arg.isPackExpansion()) - Arg = Arg.getPackExpansionPattern(); - return Arg; -} - //===----------------------------------------------------------------------===/ // Template Instantiation for Types //===----------------------------------------------------------------------===/ @@ -1449,13 +1437,6 @@ namespace { return TemplateArgs.getNewDepth(Depth); } - UnsignedOrNone getPackIndex(TemplateArgument Pack) { - UnsignedOrNone Index = getSema().ArgPackSubstIndex; - if (!Index) - return std::nullopt; - return Pack.pack_size() - 1 - *Index; - } - bool TryExpandParameterPacks(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef Unexpanded, @@ -1537,7 +1518,7 @@ namespace { if (TA.getKind() != TemplateArgument::Pack) return TA; if (SemaRef.ArgPackSubstIndex) - return getPackSubstitutedTemplateArgument(SemaRef, TA); + return SemaRef.getPackSubstitutedTemplateArgument(TA); assert(TA.pack_size() == 1 && TA.pack_begin()->isPackExpansion() && "unexpected pack arguments in template rewrite"); TemplateArgument Arg = *TA.pack_begin(); @@ -1643,10 +1624,6 @@ namespace { ExprResult TransformTemplateParmRefExpr(DeclRefExpr *E, NonTypeTemplateParmDecl *D); - ExprResult TransformSubstNonTypeTemplateParmPackExpr( - SubstNonTypeTemplateParmPackExpr *E); - ExprResult TransformSubstNonTypeTemplateParmExpr( - SubstNonTypeTemplateParmExpr *E); /// Rebuild a DeclRefExpr for a VarDecl reference. ExprResult RebuildVarDeclRefExpr(ValueDecl *PD, SourceLocation Loc); @@ -1933,12 +1910,6 @@ namespace { SmallVectorImpl &PTypes, SmallVectorImpl &TransParams, Sema::ExtParameterInfoBuilder &PInfos); - - private: - ExprResult - transformNonTypeTemplateParmRef(Decl *AssociatedDecl, const NamedDecl *parm, - SourceLocation loc, TemplateArgument arg, - UnsignedOrNone PackIndex, bool Final); }; } @@ -1975,7 +1946,7 @@ Decl *TemplateInstantiator::TransformDecl(SourceLocation Loc, Decl *D) { if (TTP->isParameterPack()) { assert(Arg.getKind() == TemplateArgument::Pack && "Missing argument pack"); - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } TemplateName Template = Arg.getAsTemplate(); @@ -2079,7 +2050,7 @@ TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D, if (!getSema().ArgPackSubstIndex) return nullptr; - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } QualType T = Arg.getAsType(); @@ -2165,8 +2136,8 @@ TemplateName TemplateInstantiator::TransformTemplateName( Arg, AssociatedDecl, TTP->getIndex(), Final); } - PackIndex = getPackIndex(Arg); - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + PackIndex = SemaRef.getPackIndex(Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } TemplateName Template = Arg.getAsTemplate(); @@ -2183,10 +2154,10 @@ TemplateName TemplateInstantiator::TransformTemplateName( TemplateArgument Pack = SubstPack->getArgumentPack(); TemplateName Template = - getPackSubstitutedTemplateArgument(getSema(), Pack).getAsTemplate(); + SemaRef.getPackSubstitutedTemplateArgument(Pack).getAsTemplate(); return getSema().Context.getSubstTemplateTemplateParm( Template, SubstPack->getAssociatedDecl(), SubstPack->getIndex(), - getPackIndex(Pack), SubstPack->getFinal()); + SemaRef.getPackIndex(Pack), SubstPack->getFinal()); } return inherited::TransformTemplateName( @@ -2252,11 +2223,11 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E, ExprType, TargetType->isReferenceType() ? VK_LValue : VK_PRValue, E->getLocation(), Arg, AssociatedDecl, NTTP->getPosition(), Final); } - PackIndex = getPackIndex(Arg); - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + PackIndex = SemaRef.getPackIndex(Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } - return transformNonTypeTemplateParmRef(AssociatedDecl, NTTP, E->getLocation(), - Arg, PackIndex, Final); + return SemaRef.BuildSubstNonTypeTemplateParmExpr( + AssociatedDecl, NTTP, E->getLocation(), Arg, PackIndex, Final); } const AnnotateAttr * @@ -2344,144 +2315,6 @@ TemplateInstantiator::TransformOpenACCRoutineDeclAttr( "applies to a Function Decl (and a few places for VarDecl)"); } -ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef( - Decl *AssociatedDecl, const NamedDecl *parm, SourceLocation loc, - TemplateArgument arg, UnsignedOrNone PackIndex, bool Final) { - ExprResult result; - - // Determine the substituted parameter type. We can usually infer this from - // the template argument, but not always. - auto SubstParamType = [&] { - if (const auto *NTTP = dyn_cast(parm)) { - QualType T; - if (NTTP->isExpandedParameterPack()) - T = NTTP->getExpansionType(*SemaRef.ArgPackSubstIndex); - else - T = NTTP->getType(); - if (parm->isParameterPack() && isa(T)) - T = cast(T)->getPattern(); - return SemaRef.SubstType(T, TemplateArgs, loc, parm->getDeclName()); - } - return SemaRef.SubstType(arg.getAsExpr()->getType(), TemplateArgs, loc, - parm->getDeclName()); - }; - - bool refParam = false; - - // The template argument itself might be an expression, in which case we just - // return that expression. This happens when substituting into an alias - // template. - if (arg.getKind() == TemplateArgument::Expression) { - Expr *argExpr = arg.getAsExpr(); - result = argExpr; - if (argExpr->isLValue()) { - if (argExpr->getType()->isRecordType()) { - // Check whether the parameter was actually a reference. - QualType paramType = SubstParamType(); - if (paramType.isNull()) - return ExprError(); - refParam = paramType->isReferenceType(); - } else { - refParam = true; - } - } - } else if (arg.getKind() == TemplateArgument::Declaration || - arg.getKind() == TemplateArgument::NullPtr) { - if (arg.getKind() == TemplateArgument::Declaration) { - ValueDecl *VD = arg.getAsDecl(); - - // Find the instantiation of the template argument. This is - // required for nested templates. - VD = cast_or_null( - getSema().FindInstantiatedDecl(loc, VD, TemplateArgs)); - if (!VD) - return ExprError(); - } - - QualType paramType = arg.getNonTypeTemplateArgumentType(); - assert(!paramType.isNull() && "type substitution failed for param type"); - assert(!paramType->isDependentType() && "param type still dependent"); - result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, paramType, loc); - refParam = paramType->isReferenceType(); - } else { - QualType paramType = arg.getNonTypeTemplateArgumentType(); - result = SemaRef.BuildExpressionFromNonTypeTemplateArgument(arg, loc); - refParam = paramType->isReferenceType(); - assert(result.isInvalid() || - SemaRef.Context.hasSameType(result.get()->getType(), - paramType.getNonReferenceType())); - } - - if (result.isInvalid()) - return ExprError(); - - Expr *resultExpr = result.get(); - return new (SemaRef.Context) SubstNonTypeTemplateParmExpr( - resultExpr->getType(), resultExpr->getValueKind(), loc, resultExpr, - AssociatedDecl, - clang::getDepthAndIndex(const_cast(parm)).second, PackIndex, - refParam, Final); -} - -ExprResult -TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr( - SubstNonTypeTemplateParmPackExpr *E) { - if (!getSema().ArgPackSubstIndex) { - // We aren't expanding the parameter pack, so just return ourselves. - return E; - } - - TemplateArgument Pack = E->getArgumentPack(); - TemplateArgument Arg = getPackSubstitutedTemplateArgument(getSema(), Pack); - return transformNonTypeTemplateParmRef( - E->getAssociatedDecl(), E->getParameterPack(), - E->getParameterPackLocation(), Arg, getPackIndex(Pack), E->getFinal()); -} - -ExprResult -TemplateInstantiator::TransformSubstNonTypeTemplateParmExpr( - SubstNonTypeTemplateParmExpr *E) { - ExprResult SubstReplacement = E->getReplacement(); - if (!isa(SubstReplacement.get())) - SubstReplacement = TransformExpr(E->getReplacement()); - if (SubstReplacement.isInvalid()) - return true; - QualType SubstType = TransformType(E->getParameterType(getSema().Context)); - if (SubstType.isNull()) - return true; - // The type may have been previously dependent and not now, which means we - // might have to implicit cast the argument to the new type, for example: - // template - // concept C = sizeof(U) == 4; - // void foo() requires C<2, 'a'> { } - // When normalizing foo(), we first form the normalized constraints of C: - // AtomicExpr(sizeof(U) == 4, - // U=SubstNonTypeTemplateParmExpr(Param=U, - // Expr=DeclRef(U), - // Type=decltype(T))) - // Then we substitute T = 2, U = 'a' into the parameter mapping, and need to - // produce: - // AtomicExpr(sizeof(U) == 4, - // U=SubstNonTypeTemplateParmExpr(Param=U, - // Expr=ImpCast( - // decltype(2), - // SubstNTTPE(Param=U, Expr='a', - // Type=char)), - // Type=decltype(2))) - // The call to CheckTemplateArgument here produces the ImpCast. - TemplateArgument SugaredConverted, CanonicalConverted; - if (SemaRef - .CheckTemplateArgument(E->getParameter(), SubstType, - SubstReplacement.get(), SugaredConverted, - CanonicalConverted, - /*StrictCheck=*/false, Sema::CTAK_Specified) - .isInvalid()) - return true; - return transformNonTypeTemplateParmRef( - E->getAssociatedDecl(), E->getParameter(), E->getExprLoc(), - SugaredConverted, E->getPackIndex(), E->getFinal()); -} - ExprResult TemplateInstantiator::RebuildVarDeclRefExpr(ValueDecl *PD, SourceLocation Loc) { DeclarationNameInfo NameInfo(PD->getDeclName(), Loc); @@ -2701,8 +2534,8 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB, } // PackIndex starts from last element. - PackIndex = getPackIndex(Arg); - Arg = getPackSubstitutedTemplateArgument(getSema(), Arg); + PackIndex = SemaRef.getPackIndex(Arg); + Arg = SemaRef.getPackSubstitutedTemplateArgument(Arg); } assert(Arg.getKind() == TemplateArgument::Type && @@ -2749,20 +2582,20 @@ QualType TemplateInstantiator::TransformSubstTemplateTypeParmPackType( } TemplateArgument Pack = T->getArgumentPack(); - TemplateArgument Arg = getPackSubstitutedTemplateArgument(getSema(), Pack); + TemplateArgument Arg = SemaRef.getPackSubstitutedTemplateArgument(Pack); return BuildSubstTemplateTypeParmType( TLB, SuppressObjCLifetime, T->getFinal(), NewReplaced, T->getIndex(), - getPackIndex(Pack), Arg, TL.getNameLoc()); + SemaRef.getPackIndex(Pack), Arg, TL.getNameLoc()); } QualType TemplateInstantiator::TransformSubstBuiltinTemplatePackType( TypeLocBuilder &TLB, SubstBuiltinTemplatePackTypeLoc TL) { if (!getSema().ArgPackSubstIndex) return TreeTransform::TransformSubstBuiltinTemplatePackType(TLB, TL); - auto &Sema = getSema(); - TemplateArgument Result = getPackSubstitutedTemplateArgument( - Sema, TL.getTypePtr()->getArgumentPack()); - TLB.pushTrivial(Sema.getASTContext(), Result.getAsType(), TL.getBeginLoc()); + TemplateArgument Result = SemaRef.getPackSubstitutedTemplateArgument( + TL.getTypePtr()->getArgumentPack()); + TLB.pushTrivial(SemaRef.getASTContext(), Result.getAsType(), + TL.getBeginLoc()); return Result.getAsType(); } diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp index adac3dff5b2b4..e2dc70360506e 100644 --- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -3742,7 +3742,7 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( ExpandedParams.reserve(D->getNumExpansionTemplateParameters()); for (unsigned I = 0, N = D->getNumExpansionTemplateParameters(); I != N; ++I) { - LocalInstantiationScope Scope(SemaRef); + LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true); TemplateParameterList *Expansion = SubstTemplateParams(D->getExpansionTemplateParameters(I)); if (!Expansion) @@ -3774,7 +3774,7 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( if (Expand) { for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgPackSubstIndexRAII SubstIndex(SemaRef, I); - LocalInstantiationScope Scope(SemaRef); + LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true); TemplateParameterList *Expansion = SubstTemplateParams(TempParams); if (!Expansion) return nullptr; @@ -3785,21 +3785,18 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( // expanded parameter pack is the original expansion type, but callers // will end up using the expanded parameter pack types for type-checking. IsExpandedParameterPack = true; - InstParams = TempParams; - } else { - // We cannot fully expand the pack expansion now, so just substitute - // into the pattern. - Sema::ArgPackSubstIndexRAII SubstIndex(SemaRef, std::nullopt); - - LocalInstantiationScope Scope(SemaRef); - InstParams = SubstTemplateParams(TempParams); - if (!InstParams) - return nullptr; } + + Sema::ArgPackSubstIndexRAII SubstIndex(SemaRef, std::nullopt); + + LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true); + InstParams = SubstTemplateParams(TempParams); + if (!InstParams) + return nullptr; } else { // Perform the actual substitution of template parameters within a new, // local instantiation scope. - LocalInstantiationScope Scope(SemaRef); + LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true); InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h index 242ffb09af006..6967301483361 100644 --- a/clang/lib/Sema/TreeTransform.h +++ b/clang/lib/Sema/TreeTransform.h @@ -1783,6 +1783,14 @@ class TreeTransform { LParenLoc, EndLoc); } + OMPClause * + RebuildOMPLoopRangeClause(Expr *First, Expr *Count, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation FirstLoc, + SourceLocation CountLoc, SourceLocation EndLoc) { + return getSema().OpenMP().ActOnOpenMPLoopRangeClause( + First, Count, StartLoc, LParenLoc, FirstLoc, CountLoc, EndLoc); + } + /// Build a new OpenMP 'allocator' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. @@ -9607,6 +9615,17 @@ StmtResult TreeTransform::TransformOMPInterchangeDirective( return Res; } +template +StmtResult +TreeTransform::TransformOMPFuseDirective(OMPFuseDirective *D) { + DeclarationNameInfo DirName; + getDerived().getSema().OpenMP().StartOpenMPDSABlock( + D->getDirectiveKind(), DirName, nullptr, D->getBeginLoc()); + StmtResult Res = getDerived().TransformOMPExecutableDirective(D); + getDerived().getSema().OpenMP().EndOpenMPDSABlock(Res.get()); + return Res; +} + template StmtResult TreeTransform::TransformOMPForDirective(OMPForDirective *D) { @@ -10500,6 +10519,31 @@ TreeTransform::TransformOMPPartialClause(OMPPartialClause *C) { C->getEndLoc()); } +template +OMPClause * +TreeTransform::TransformOMPLoopRangeClause(OMPLoopRangeClause *C) { + ExprResult F = getDerived().TransformExpr(C->getFirst()); + if (F.isInvalid()) + return nullptr; + + ExprResult Cn = getDerived().TransformExpr(C->getCount()); + if (Cn.isInvalid()) + return nullptr; + + Expr *First = F.get(); + Expr *Count = Cn.get(); + + bool Changed = (First != C->getFirst()) || (Count != C->getCount()); + + // If no changes and AlwaysRebuild() is false, return the original clause + if (!Changed && !getDerived().AlwaysRebuild()) + return C; + + return RebuildOMPLoopRangeClause(First, Count, C->getBeginLoc(), + C->getLParenLoc(), C->getFirstLoc(), + C->getCountLoc(), C->getEndLoc()); +} + template OMPClause * TreeTransform::TransformOMPCollapseClause(OMPCollapseClause *C) { @@ -16289,20 +16333,68 @@ TreeTransform::TransformPackIndexingExpr(PackIndexingExpr *E) { IndexExpr.get(), ExpandedExprs, FullySubstituted); } -template -ExprResult -TreeTransform::TransformSubstNonTypeTemplateParmPackExpr( - SubstNonTypeTemplateParmPackExpr *E) { - // Default behavior is to do nothing with this transformation. - return E; +template +ExprResult TreeTransform::TransformSubstNonTypeTemplateParmPackExpr( + SubstNonTypeTemplateParmPackExpr *E) { + if (!getSema().ArgPackSubstIndex) + // We aren't expanding the parameter pack, so just return ourselves. + return E; + + TemplateArgument Pack = E->getArgumentPack(); + TemplateArgument Arg = SemaRef.getPackSubstitutedTemplateArgument(Pack); + return SemaRef.BuildSubstNonTypeTemplateParmExpr( + E->getAssociatedDecl(), E->getParameterPack(), + E->getParameterPackLocation(), Arg, SemaRef.getPackIndex(Pack), + E->getFinal()); } -template -ExprResult -TreeTransform::TransformSubstNonTypeTemplateParmExpr( - SubstNonTypeTemplateParmExpr *E) { - // Default behavior is to do nothing with this transformation. - return E; +template +ExprResult TreeTransform::TransformSubstNonTypeTemplateParmExpr( + SubstNonTypeTemplateParmExpr *E) { + Expr *OrigReplacement = E->getReplacement()->IgnoreImplicitAsWritten(); + ExprResult Replacement = getDerived().TransformExpr(OrigReplacement); + if (Replacement.isInvalid()) + return true; + + Decl *AssociatedDecl = + getDerived().TransformDecl(E->getNameLoc(), E->getAssociatedDecl()); + if (!AssociatedDecl) + return true; + + if (Replacement.get() == OrigReplacement && + AssociatedDecl == E->getAssociatedDecl()) + return E; + + // If the replacement expression did not change, and the parameter type + // did not change, we can skip the semantic action because it would + // produce the same result anyway. + auto *Param = cast( + getReplacedTemplateParameterList(AssociatedDecl) + ->asArray()[E->getIndex()]); + if (QualType ParamType = Param->getType(); + !SemaRef.Context.hasSameType(ParamType, E->getParameter()->getType()) || + Replacement.get() != OrigReplacement) { + + // When transforming the replacement expression previously, all Sema + // specific annotations, such as implicit casts, are discarded. Calling the + // corresponding sema action is necessary to recover those. Otherwise, + // equivalency of the result would be lost. + TemplateArgument SugaredConverted, CanonicalConverted; + Replacement = SemaRef.CheckTemplateArgument( + Param, ParamType, Replacement.get(), SugaredConverted, + CanonicalConverted, + /*StrictCheck=*/false, Sema::CTAK_Specified); + if (Replacement.isInvalid()) + return true; + } else { + // Otherwise, the same expression would have been produced. + Replacement = E->getReplacement(); + } + + return new (SemaRef.Context) SubstNonTypeTemplateParmExpr( + Replacement.get()->getType(), Replacement.get()->getValueKind(), + E->getNameLoc(), Replacement.get(), AssociatedDecl, E->getIndex(), + E->getPackIndex(), E->isReferenceParameter(), E->getFinal()); } template diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index 9ee8a0fb0f060..c05e428a6fb39 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -11215,6 +11215,9 @@ OMPClause *OMPClauseReader::readClause() { case llvm::omp::OMPC_partial: C = OMPPartialClause::CreateEmpty(Context); break; + case llvm::omp::OMPC_looprange: + C = OMPLoopRangeClause::CreateEmpty(Context); + break; case llvm::omp::OMPC_allocator: C = new (Context) OMPAllocatorClause(); break; @@ -11618,6 +11621,14 @@ void OMPClauseReader::VisitOMPPartialClause(OMPPartialClause *C) { C->setLParenLoc(Record.readSourceLocation()); } +void OMPClauseReader::VisitOMPLoopRangeClause(OMPLoopRangeClause *C) { + C->setFirst(Record.readSubExpr()); + C->setCount(Record.readSubExpr()); + C->setLParenLoc(Record.readSourceLocation()); + C->setFirstLoc(Record.readSourceLocation()); + C->setCountLoc(Record.readSourceLocation()); +} + void OMPClauseReader::VisitOMPAllocatorClause(OMPAllocatorClause *C) { C->setAllocator(Record.readExpr()); C->setLParenLoc(Record.readSourceLocation()); diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp index 213c2c2148f64..70b898a53fcbd 100644 --- a/clang/lib/Serialization/ASTReaderStmt.cpp +++ b/clang/lib/Serialization/ASTReaderStmt.cpp @@ -2469,10 +2469,21 @@ void ASTStmtReader::VisitOMPReverseDirective(OMPReverseDirective *D) { VisitOMPCanonicalLoopNestTransformationDirective(D); } +void ASTStmtReader::VisitOMPCanonicalLoopSequenceTransformationDirective( + OMPCanonicalLoopSequenceTransformationDirective *D) { + VisitStmt(D); + VisitOMPExecutableDirective(D); + D->setNumGeneratedTopLevelLoops(Record.readUInt32()); +} + void ASTStmtReader::VisitOMPInterchangeDirective(OMPInterchangeDirective *D) { VisitOMPCanonicalLoopNestTransformationDirective(D); } +void ASTStmtReader::VisitOMPFuseDirective(OMPFuseDirective *D) { + VisitOMPCanonicalLoopSequenceTransformationDirective(D); +} + void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) { VisitOMPLoopDirective(D); D->setHasCancel(Record.readBool()); @@ -3615,6 +3626,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { break; } + case STMT_OMP_FUSE_DIRECTIVE: { + unsigned NumClauses = Record[ASTStmtReader::NumStmtFields]; + S = OMPFuseDirective::CreateEmpty(Context, NumClauses); + break; + } + case STMT_OMP_INTERCHANGE_DIRECTIVE: { unsigned NumLoops = Record[ASTStmtReader::NumStmtFields]; unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1]; diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index 09859da171fcd..cdf95ba1c4ba5 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -7882,6 +7882,14 @@ void OMPClauseWriter::VisitOMPPartialClause(OMPPartialClause *C) { Record.AddSourceLocation(C->getLParenLoc()); } +void OMPClauseWriter::VisitOMPLoopRangeClause(OMPLoopRangeClause *C) { + Record.AddStmt(C->getFirst()); + Record.AddStmt(C->getCount()); + Record.AddSourceLocation(C->getLParenLoc()); + Record.AddSourceLocation(C->getFirstLoc()); + Record.AddSourceLocation(C->getCountLoc()); +} + void OMPClauseWriter::VisitOMPAllocatorClause(OMPAllocatorClause *C) { Record.AddStmt(C->getAllocator()); Record.AddSourceLocation(C->getLParenLoc()); diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp index 21c04ddbc2c7a..ebda91e3819c3 100644 --- a/clang/lib/Serialization/ASTWriterStmt.cpp +++ b/clang/lib/Serialization/ASTWriterStmt.cpp @@ -2487,6 +2487,18 @@ void ASTStmtWriter::VisitOMPInterchangeDirective(OMPInterchangeDirective *D) { Code = serialization::STMT_OMP_INTERCHANGE_DIRECTIVE; } +void ASTStmtWriter::VisitOMPCanonicalLoopSequenceTransformationDirective( + OMPCanonicalLoopSequenceTransformationDirective *D) { + VisitStmt(D); + VisitOMPExecutableDirective(D); + Record.writeUInt32(D->getNumGeneratedTopLevelLoops()); +} + +void ASTStmtWriter::VisitOMPFuseDirective(OMPFuseDirective *D) { + VisitOMPCanonicalLoopSequenceTransformationDirective(D); + Code = serialization::STMT_OMP_FUSE_DIRECTIVE; +} + void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) { VisitOMPLoopDirective(D); Record.writeBool(D->hasCancel()); diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp index 785cdfa15bf04..4e472b7fc38b0 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -1814,6 +1814,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::OMPStripeDirectiveClass: case Stmt::OMPTileDirectiveClass: case Stmt::OMPInterchangeDirectiveClass: + case Stmt::OMPFuseDirectiveClass: case Stmt::OMPInteropDirectiveClass: case Stmt::OMPDispatchDirectiveClass: case Stmt::OMPMaskedDirectiveClass: diff --git a/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp index be74ff2cd4799..5301f88057203 100644 --- a/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp +++ b/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp @@ -65,7 +65,7 @@ void ModelInjector::onBodySynthesis(const NamedDecl *D) { else fileName = llvm::StringRef(D->getName().str() + ".model"); - if (!llvm::sys::fs::exists(fileName.str())) { + if (!CI.getVirtualFileSystem().exists(fileName)) { Bodies[D->getName()] = nullptr; return; } diff --git a/clang/lib/Tooling/DependencyScanning/CMakeLists.txt b/clang/lib/Tooling/DependencyScanning/CMakeLists.txt index 42a63faa26d3e..53a2728bd5786 100644 --- a/clang/lib/Tooling/DependencyScanning/CMakeLists.txt +++ b/clang/lib/Tooling/DependencyScanning/CMakeLists.txt @@ -10,6 +10,7 @@ add_clang_library(clangDependencyScanning DependencyScanningService.cpp DependencyScanningWorker.cpp DependencyScanningTool.cpp + DependencyScannerImpl.cpp InProcessModuleCache.cpp ModuleDepCollector.cpp diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp new file mode 100644 index 0000000000000..d370bfd0dd10f --- /dev/null +++ b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.cpp @@ -0,0 +1,525 @@ +//===- DependencyScanner.cpp - Performs module dependency scanning --------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "DependencyScannerImpl.h" +#include "clang/Basic/DiagnosticFrontend.h" +#include "clang/Basic/DiagnosticSerialization.h" +#include "clang/Frontend/FrontendActions.h" +#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h" + +using namespace clang; +using namespace tooling; +using namespace dependencies; + +namespace { +/// Forwards the gatherered dependencies to the consumer. +class DependencyConsumerForwarder : public DependencyFileGenerator { +public: + DependencyConsumerForwarder(std::unique_ptr Opts, + StringRef WorkingDirectory, DependencyConsumer &C) + : DependencyFileGenerator(*Opts), WorkingDirectory(WorkingDirectory), + Opts(std::move(Opts)), C(C) {} + + void finishedMainFile(DiagnosticsEngine &Diags) override { + C.handleDependencyOutputOpts(*Opts); + llvm::SmallString<256> CanonPath; + for (const auto &File : getDependencies()) { + CanonPath = File; + llvm::sys::path::remove_dots(CanonPath, /*remove_dot_dot=*/true); + llvm::sys::fs::make_absolute(WorkingDirectory, CanonPath); + C.handleFileDependency(CanonPath); + } + } + +private: + StringRef WorkingDirectory; + std::unique_ptr Opts; + DependencyConsumer &C; +}; + +static bool checkHeaderSearchPaths(const HeaderSearchOptions &HSOpts, + const HeaderSearchOptions &ExistingHSOpts, + DiagnosticsEngine *Diags, + const LangOptions &LangOpts) { + if (LangOpts.Modules) { + if (HSOpts.VFSOverlayFiles != ExistingHSOpts.VFSOverlayFiles) { + if (Diags) { + Diags->Report(diag::warn_pch_vfsoverlay_mismatch); + auto VFSNote = [&](int Type, ArrayRef VFSOverlays) { + if (VFSOverlays.empty()) { + Diags->Report(diag::note_pch_vfsoverlay_empty) << Type; + } else { + std::string Files = llvm::join(VFSOverlays, "\n"); + Diags->Report(diag::note_pch_vfsoverlay_files) << Type << Files; + } + }; + VFSNote(0, HSOpts.VFSOverlayFiles); + VFSNote(1, ExistingHSOpts.VFSOverlayFiles); + } + } + } + return false; +} + +using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles); + +/// A listener that collects the imported modules and the input +/// files. While visiting, collect vfsoverlays and file inputs that determine +/// whether prebuilt modules fully resolve in stable directories. +class PrebuiltModuleListener : public ASTReaderListener { +public: + PrebuiltModuleListener(PrebuiltModuleFilesT &PrebuiltModuleFiles, + llvm::SmallVector &NewModuleFiles, + PrebuiltModulesAttrsMap &PrebuiltModulesASTMap, + const HeaderSearchOptions &HSOpts, + const LangOptions &LangOpts, DiagnosticsEngine &Diags, + const ArrayRef StableDirs) + : PrebuiltModuleFiles(PrebuiltModuleFiles), + NewModuleFiles(NewModuleFiles), + PrebuiltModulesASTMap(PrebuiltModulesASTMap), ExistingHSOpts(HSOpts), + ExistingLangOpts(LangOpts), Diags(Diags), StableDirs(StableDirs) {} + + bool needsImportVisitation() const override { return true; } + bool needsInputFileVisitation() override { return true; } + bool needsSystemInputFileVisitation() override { return true; } + + /// Accumulate the modules are transitively depended on by the initial + /// prebuilt module. + void visitImport(StringRef ModuleName, StringRef Filename) override { + if (PrebuiltModuleFiles.insert({ModuleName.str(), Filename.str()}).second) + NewModuleFiles.push_back(Filename.str()); + + auto PrebuiltMapEntry = PrebuiltModulesASTMap.try_emplace(Filename); + PrebuiltModuleASTAttrs &PrebuiltModule = PrebuiltMapEntry.first->second; + if (PrebuiltMapEntry.second) + PrebuiltModule.setInStableDir(!StableDirs.empty()); + + if (auto It = PrebuiltModulesASTMap.find(CurrentFile); + It != PrebuiltModulesASTMap.end() && CurrentFile != Filename) + PrebuiltModule.addDependent(It->getKey()); + } + + /// For each input file discovered, check whether it's external path is in a + /// stable directory. Traversal is stopped if the current module is not + /// considered stable. + bool visitInputFileAsRequested(StringRef FilenameAsRequested, + StringRef Filename, bool isSystem, + bool isOverridden, + bool isExplicitModule) override { + if (StableDirs.empty()) + return false; + auto PrebuiltEntryIt = PrebuiltModulesASTMap.find(CurrentFile); + if ((PrebuiltEntryIt == PrebuiltModulesASTMap.end()) || + (!PrebuiltEntryIt->second.isInStableDir())) + return false; + + PrebuiltEntryIt->second.setInStableDir( + isPathInStableDir(StableDirs, Filename)); + return PrebuiltEntryIt->second.isInStableDir(); + } + + /// Update which module that is being actively traversed. + void visitModuleFile(StringRef Filename, + serialization::ModuleKind Kind) override { + // If the CurrentFile is not + // considered stable, update any of it's transitive dependents. + auto PrebuiltEntryIt = PrebuiltModulesASTMap.find(CurrentFile); + if ((PrebuiltEntryIt != PrebuiltModulesASTMap.end()) && + !PrebuiltEntryIt->second.isInStableDir()) + PrebuiltEntryIt->second.updateDependentsNotInStableDirs( + PrebuiltModulesASTMap); + CurrentFile = Filename; + } + + /// Check the header search options for a given module when considering + /// if the module comes from stable directories. + bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts, + StringRef ModuleFilename, + StringRef SpecificModuleCachePath, + bool Complain) override { + + auto PrebuiltMapEntry = PrebuiltModulesASTMap.try_emplace(CurrentFile); + PrebuiltModuleASTAttrs &PrebuiltModule = PrebuiltMapEntry.first->second; + if (PrebuiltMapEntry.second) + PrebuiltModule.setInStableDir(!StableDirs.empty()); + + if (PrebuiltModule.isInStableDir()) + PrebuiltModule.setInStableDir(areOptionsInStableDir(StableDirs, HSOpts)); + + return false; + } + + /// Accumulate vfsoverlays used to build these prebuilt modules. + bool ReadHeaderSearchPaths(const HeaderSearchOptions &HSOpts, + bool Complain) override { + + auto PrebuiltMapEntry = PrebuiltModulesASTMap.try_emplace(CurrentFile); + PrebuiltModuleASTAttrs &PrebuiltModule = PrebuiltMapEntry.first->second; + if (PrebuiltMapEntry.second) + PrebuiltModule.setInStableDir(!StableDirs.empty()); + + PrebuiltModule.setVFS( + llvm::StringSet<>(llvm::from_range, HSOpts.VFSOverlayFiles)); + + return checkHeaderSearchPaths( + HSOpts, ExistingHSOpts, Complain ? &Diags : nullptr, ExistingLangOpts); + } + +private: + PrebuiltModuleFilesT &PrebuiltModuleFiles; + llvm::SmallVector &NewModuleFiles; + PrebuiltModulesAttrsMap &PrebuiltModulesASTMap; + const HeaderSearchOptions &ExistingHSOpts; + const LangOptions &ExistingLangOpts; + DiagnosticsEngine &Diags; + std::string CurrentFile; + const ArrayRef StableDirs; +}; + +/// Visit the given prebuilt module and collect all of the modules it +/// transitively imports and contributing input files. +static bool visitPrebuiltModule(StringRef PrebuiltModuleFilename, + CompilerInstance &CI, + PrebuiltModuleFilesT &ModuleFiles, + PrebuiltModulesAttrsMap &PrebuiltModulesASTMap, + DiagnosticsEngine &Diags, + const ArrayRef StableDirs) { + // List of module files to be processed. + llvm::SmallVector Worklist; + + PrebuiltModuleListener Listener(ModuleFiles, Worklist, PrebuiltModulesASTMap, + CI.getHeaderSearchOpts(), CI.getLangOpts(), + Diags, StableDirs); + + Listener.visitModuleFile(PrebuiltModuleFilename, + serialization::MK_ExplicitModule); + if (ASTReader::readASTFileControlBlock( + PrebuiltModuleFilename, CI.getFileManager(), CI.getModuleCache(), + CI.getPCHContainerReader(), + /*FindModuleFileExtensions=*/false, Listener, + /*ValidateDiagnosticOptions=*/false, ASTReader::ARR_OutOfDate)) + return true; + + while (!Worklist.empty()) { + Listener.visitModuleFile(Worklist.back(), serialization::MK_ExplicitModule); + if (ASTReader::readASTFileControlBlock( + Worklist.pop_back_val(), CI.getFileManager(), CI.getModuleCache(), + CI.getPCHContainerReader(), + /*FindModuleFileExtensions=*/false, Listener, + /*ValidateDiagnosticOptions=*/false)) + return true; + } + return false; +} + +/// Transform arbitrary file name into an object-like file name. +static std::string makeObjFileName(StringRef FileName) { + SmallString<128> ObjFileName(FileName); + llvm::sys::path::replace_extension(ObjFileName, "o"); + return std::string(ObjFileName); +} + +/// Deduce the dependency target based on the output file and input files. +static std::string +deduceDepTarget(const std::string &OutputFile, + const SmallVectorImpl &InputFiles) { + if (OutputFile != "-") + return OutputFile; + + if (InputFiles.empty() || !InputFiles.front().isFile()) + return "clang-scan-deps\\ dependency"; + + return makeObjFileName(InputFiles.front().getFile()); +} + +// Clang implements -D and -U by splatting text into a predefines buffer. This +// allows constructs such as `-DFඞ=3 "-D F\u{0D9E} 4 3 2”` to be accepted and +// define the same macro, or adding C++ style comments before the macro name. +// +// This function checks that the first non-space characters in the macro +// obviously form an identifier that can be uniqued on without lexing. Failing +// to do this could lead to changing the final definition of a macro. +// +// We could set up a preprocessor and actually lex the name, but that's very +// heavyweight for a situation that will almost never happen in practice. +static std::optional getSimpleMacroName(StringRef Macro) { + StringRef Name = Macro.split("=").first.ltrim(" \t"); + std::size_t I = 0; + + auto FinishName = [&]() -> std::optional { + StringRef SimpleName = Name.slice(0, I); + if (SimpleName.empty()) + return std::nullopt; + return SimpleName; + }; + + for (; I != Name.size(); ++I) { + switch (Name[I]) { + case '(': // Start of macro parameter list + case ' ': // End of macro name + case '\t': + return FinishName(); + case '_': + continue; + default: + if (llvm::isAlnum(Name[I])) + continue; + return std::nullopt; + } + } + return FinishName(); +} + +static void canonicalizeDefines(PreprocessorOptions &PPOpts) { + using MacroOpt = std::pair; + std::vector SimpleNames; + SimpleNames.reserve(PPOpts.Macros.size()); + std::size_t Index = 0; + for (const auto &M : PPOpts.Macros) { + auto SName = getSimpleMacroName(M.first); + // Skip optimizing if we can't guarantee we can preserve relative order. + if (!SName) + return; + SimpleNames.emplace_back(*SName, Index); + ++Index; + } + + llvm::stable_sort(SimpleNames, llvm::less_first()); + // Keep the last instance of each macro name by going in reverse + auto NewEnd = std::unique( + SimpleNames.rbegin(), SimpleNames.rend(), + [](const MacroOpt &A, const MacroOpt &B) { return A.first == B.first; }); + SimpleNames.erase(SimpleNames.begin(), NewEnd.base()); + + // Apply permutation. + decltype(PPOpts.Macros) NewMacros; + NewMacros.reserve(SimpleNames.size()); + for (std::size_t I = 0, E = SimpleNames.size(); I != E; ++I) { + std::size_t OriginalIndex = SimpleNames[I].second; + // We still emit undefines here as they may be undefining a predefined macro + NewMacros.push_back(std::move(PPOpts.Macros[OriginalIndex])); + } + std::swap(PPOpts.Macros, NewMacros); +} + +class ScanningDependencyDirectivesGetter : public DependencyDirectivesGetter { + DependencyScanningWorkerFilesystem *DepFS; + +public: + ScanningDependencyDirectivesGetter(FileManager &FileMgr) : DepFS(nullptr) { + FileMgr.getVirtualFileSystem().visit([&](llvm::vfs::FileSystem &FS) { + auto *DFS = llvm::dyn_cast(&FS); + if (DFS) { + assert(!DepFS && "Found multiple scanning VFSs"); + DepFS = DFS; + } + }); + assert(DepFS && "Did not find scanning VFS"); + } + + std::unique_ptr + cloneFor(FileManager &FileMgr) override { + return std::make_unique(FileMgr); + } + + std::optional> + operator()(FileEntryRef File) override { + return DepFS->getDirectiveTokens(File.getName()); + } +}; +} // namespace + +/// Sanitize diagnostic options for dependency scan. +void clang::tooling::dependencies::sanitizeDiagOpts( + DiagnosticOptions &DiagOpts) { + // Don't print 'X warnings and Y errors generated'. + DiagOpts.ShowCarets = false; + // Don't write out diagnostic file. + DiagOpts.DiagnosticSerializationFile.clear(); + // Don't emit warnings except for scanning specific warnings. + // TODO: It would be useful to add a more principled way to ignore all + // warnings that come from source code. The issue is that we need to + // ignore warnings that could be surpressed by + // `#pragma clang diagnostic`, while still allowing some scanning + // warnings for things we're not ready to turn into errors yet. + // See `test/ClangScanDeps/diagnostic-pragmas.c` for an example. + llvm::erase_if(DiagOpts.Warnings, [](StringRef Warning) { + return llvm::StringSwitch(Warning) + .Cases("pch-vfs-diff", "error=pch-vfs-diff", false) + .StartsWith("no-error=", false) + .Default(true); + }); +} + +bool DependencyScanningAction::runInvocation( + std::shared_ptr Invocation, + IntrusiveRefCntPtr FS, + std::shared_ptr PCHContainerOps, + DiagnosticConsumer *DiagConsumer) { + // Making sure that we canonicalize the defines before we create the deep + // copy to avoid unnecessary variants in the scanner and in the resulting + // explicit command lines. + if (any(Service.getOptimizeArgs() & ScanningOptimizations::Macros)) + canonicalizeDefines(Invocation->getPreprocessorOpts()); + + // Make a deep copy of the original Clang invocation. + CompilerInvocation OriginalInvocation(*Invocation); + + if (Scanned) { + // Scanning runs once for the first -cc1 invocation in a chain of driver + // jobs. For any dependent jobs, reuse the scanning result and just + // update the LastCC1Arguments to correspond to the new invocation. + // FIXME: to support multi-arch builds, each arch requires a separate scan + setLastCC1Arguments(std::move(OriginalInvocation)); + return true; + } + + Scanned = true; + + // Create a compiler instance to handle the actual work. + auto ModCache = makeInProcessModuleCache(Service.getModuleCacheEntries()); + ScanInstanceStorage.emplace(std::move(Invocation), std::move(PCHContainerOps), + ModCache.get()); + CompilerInstance &ScanInstance = *ScanInstanceStorage; + ScanInstance.setBuildingModule(false); + + ScanInstance.createVirtualFileSystem(FS, DiagConsumer); + + // Create the compiler's actual diagnostics engine. + sanitizeDiagOpts(ScanInstance.getDiagnosticOpts()); + assert(!DiagConsumerFinished && "attempt to reuse finished consumer"); + ScanInstance.createDiagnostics(DiagConsumer, /*ShouldOwnClient=*/false); + if (!ScanInstance.hasDiagnostics()) + return false; + + ScanInstance.getPreprocessorOpts().AllowPCHWithDifferentModulesCachePath = + true; + + if (ScanInstance.getHeaderSearchOpts().ModulesValidateOncePerBuildSession) + ScanInstance.getHeaderSearchOpts().BuildSessionTimestamp = + Service.getBuildSessionTimestamp(); + + ScanInstance.getFrontendOpts().DisableFree = false; + ScanInstance.getFrontendOpts().GenerateGlobalModuleIndex = false; + ScanInstance.getFrontendOpts().UseGlobalModuleIndex = false; + // This will prevent us compiling individual modules asynchronously since + // FileManager is not thread-safe, but it does improve performance for now. + ScanInstance.getFrontendOpts().ModulesShareFileManager = true; + ScanInstance.getHeaderSearchOpts().ModuleFormat = "raw"; + ScanInstance.getHeaderSearchOpts().ModulesIncludeVFSUsage = + any(Service.getOptimizeArgs() & ScanningOptimizations::VFS); + + // Create a new FileManager to match the invocation's FileSystemOptions. + auto *FileMgr = ScanInstance.createFileManager(); + + // Use the dependency scanning optimized file system if requested to do so. + if (DepFS) { + DepFS->resetBypassedPathPrefix(); + if (!ScanInstance.getHeaderSearchOpts().ModuleCachePath.empty()) { + SmallString<256> ModulesCachePath; + normalizeModuleCachePath( + *FileMgr, ScanInstance.getHeaderSearchOpts().ModuleCachePath, + ModulesCachePath); + DepFS->setBypassedPathPrefix(ModulesCachePath); + } + + ScanInstance.setDependencyDirectivesGetter( + std::make_unique(*FileMgr)); + } + + ScanInstance.createSourceManager(*FileMgr); + + // Create a collection of stable directories derived from the ScanInstance + // for determining whether module dependencies would fully resolve from + // those directories. + llvm::SmallVector StableDirs; + const StringRef Sysroot = ScanInstance.getHeaderSearchOpts().Sysroot; + if (!Sysroot.empty() && (llvm::sys::path::root_directory(Sysroot) != Sysroot)) + StableDirs = {Sysroot, ScanInstance.getHeaderSearchOpts().ResourceDir}; + + // Store a mapping of prebuilt module files and their properties like header + // search options. This will prevent the implicit build to create duplicate + // modules and will force reuse of the existing prebuilt module files + // instead. + PrebuiltModulesAttrsMap PrebuiltModulesASTMap; + + if (!ScanInstance.getPreprocessorOpts().ImplicitPCHInclude.empty()) + if (visitPrebuiltModule( + ScanInstance.getPreprocessorOpts().ImplicitPCHInclude, ScanInstance, + ScanInstance.getHeaderSearchOpts().PrebuiltModuleFiles, + PrebuiltModulesASTMap, ScanInstance.getDiagnostics(), StableDirs)) + return false; + + // Create the dependency collector that will collect the produced + // dependencies. + // + // This also moves the existing dependency output options from the + // invocation to the collector. The options in the invocation are reset, + // which ensures that the compiler won't create new dependency collectors, + // and thus won't write out the extra '.d' files to disk. + auto Opts = std::make_unique(); + std::swap(*Opts, ScanInstance.getInvocation().getDependencyOutputOpts()); + // We need at least one -MT equivalent for the generator of make dependency + // files to work. + if (Opts->Targets.empty()) + Opts->Targets = {deduceDepTarget(ScanInstance.getFrontendOpts().OutputFile, + ScanInstance.getFrontendOpts().Inputs)}; + Opts->IncludeSystemHeaders = true; + + switch (Service.getFormat()) { + case ScanningOutputFormat::Make: + ScanInstance.addDependencyCollector( + std::make_shared( + std::move(Opts), WorkingDirectory, Consumer)); + break; + case ScanningOutputFormat::P1689: + case ScanningOutputFormat::Full: + MDC = std::make_shared( + Service, std::move(Opts), ScanInstance, Consumer, Controller, + OriginalInvocation, std::move(PrebuiltModulesASTMap), StableDirs); + ScanInstance.addDependencyCollector(MDC); + break; + } + + // Consider different header search and diagnostic options to create + // different modules. This avoids the unsound aliasing of module PCMs. + // + // TODO: Implement diagnostic bucketing to reduce the impact of strict + // context hashing. + ScanInstance.getHeaderSearchOpts().ModulesStrictContextHash = true; + ScanInstance.getHeaderSearchOpts().ModulesSerializeOnlyPreprocessor = true; + ScanInstance.getHeaderSearchOpts().ModulesSkipDiagnosticOptions = true; + ScanInstance.getHeaderSearchOpts().ModulesSkipHeaderSearchPaths = true; + ScanInstance.getHeaderSearchOpts().ModulesSkipPragmaDiagnosticMappings = true; + ScanInstance.getHeaderSearchOpts().ModulesForceValidateUserHeaders = false; + + // Avoid some checks and module map parsing when loading PCM files. + ScanInstance.getPreprocessorOpts().ModulesCheckRelocated = false; + + std::unique_ptr Action; + + if (Service.getFormat() == ScanningOutputFormat::P1689) + Action = std::make_unique(); + else if (ModuleName) + Action = std::make_unique(*ModuleName); + else + Action = std::make_unique(); + + if (ScanInstance.getDiagnostics().hasErrorOccurred()) + return false; + + const bool Result = ScanInstance.ExecuteAction(*Action); + + // ExecuteAction is responsible for calling finish. + DiagConsumerFinished = true; + + if (Result) + setLastCC1Arguments(std::move(OriginalInvocation)); + + return Result; +} diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.h b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.h new file mode 100644 index 0000000000000..32fbcfffde53c --- /dev/null +++ b/clang/lib/Tooling/DependencyScanning/DependencyScannerImpl.h @@ -0,0 +1,82 @@ +//===- DependencyScanner.h - Performs module dependency scanning *- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNER_H +#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNER_H + +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Frontend/CompilerInvocation.h" +#include "clang/Serialization/ObjectFilePCHContainerReader.h" +#include "clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h" +#include "clang/Tooling/DependencyScanning/ModuleDepCollector.h" + +namespace clang { +class DiagnosticConsumer; + +namespace tooling { +namespace dependencies { +class DependencyScanningService; +class DependencyConsumer; +class DependencyActionController; +class DependencyScanningWorkerFilesystem; + +class DependencyScanningAction { +public: + DependencyScanningAction( + DependencyScanningService &Service, StringRef WorkingDirectory, + DependencyConsumer &Consumer, DependencyActionController &Controller, + llvm::IntrusiveRefCntPtr DepFS, + std::optional ModuleName = std::nullopt) + : Service(Service), WorkingDirectory(WorkingDirectory), + Consumer(Consumer), Controller(Controller), DepFS(std::move(DepFS)), + ModuleName(ModuleName) {} + bool runInvocation(std::shared_ptr Invocation, + IntrusiveRefCntPtr FS, + std::shared_ptr PCHContainerOps, + DiagnosticConsumer *DiagConsumer); + + bool hasScanned() const { return Scanned; } + bool hasDiagConsumerFinished() const { return DiagConsumerFinished; } + + /// Take the cc1 arguments corresponding to the most recent invocation used + /// with this action. Any modifications implied by the discovered dependencies + /// will have already been applied. + std::vector takeLastCC1Arguments() { + std::vector Result; + std::swap(Result, LastCC1Arguments); // Reset LastCC1Arguments to empty. + return Result; + } + +private: + void setLastCC1Arguments(CompilerInvocation &&CI) { + if (MDC) + MDC->applyDiscoveredDependencies(CI); + LastCC1Arguments = CI.getCC1CommandLine(); + } + + DependencyScanningService &Service; + StringRef WorkingDirectory; + DependencyConsumer &Consumer; + DependencyActionController &Controller; + llvm::IntrusiveRefCntPtr DepFS; + std::optional ModuleName; + std::optional ScanInstanceStorage; + std::shared_ptr MDC; + std::vector LastCC1Arguments; + bool Scanned = false; + bool DiagConsumerFinished = false; +}; + +// Helper functions +void sanitizeDiagOpts(DiagnosticOptions &DiagOpts); + +} // namespace dependencies +} // namespace tooling +} // namespace clang + +#endif diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp index 8375732e4aa33..796e587ba9147 100644 --- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp +++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h" +#include "DependencyScannerImpl.h" #include "clang/Basic/DiagnosticDriver.h" #include "clang/Basic/DiagnosticFrontend.h" #include "clang/Basic/DiagnosticSerialization.h" @@ -35,563 +36,6 @@ using namespace clang; using namespace tooling; using namespace dependencies; -namespace { - -/// Forwards the gatherered dependencies to the consumer. -class DependencyConsumerForwarder : public DependencyFileGenerator { -public: - DependencyConsumerForwarder(std::unique_ptr Opts, - StringRef WorkingDirectory, DependencyConsumer &C) - : DependencyFileGenerator(*Opts), WorkingDirectory(WorkingDirectory), - Opts(std::move(Opts)), C(C) {} - - void finishedMainFile(DiagnosticsEngine &Diags) override { - C.handleDependencyOutputOpts(*Opts); - llvm::SmallString<256> CanonPath; - for (const auto &File : getDependencies()) { - CanonPath = File; - llvm::sys::path::remove_dots(CanonPath, /*remove_dot_dot=*/true); - llvm::sys::fs::make_absolute(WorkingDirectory, CanonPath); - C.handleFileDependency(CanonPath); - } - } - -private: - StringRef WorkingDirectory; - std::unique_ptr Opts; - DependencyConsumer &C; -}; - -static bool checkHeaderSearchPaths(const HeaderSearchOptions &HSOpts, - const HeaderSearchOptions &ExistingHSOpts, - DiagnosticsEngine *Diags, - const LangOptions &LangOpts) { - if (LangOpts.Modules) { - if (HSOpts.VFSOverlayFiles != ExistingHSOpts.VFSOverlayFiles) { - if (Diags) { - Diags->Report(diag::warn_pch_vfsoverlay_mismatch); - auto VFSNote = [&](int Type, ArrayRef VFSOverlays) { - if (VFSOverlays.empty()) { - Diags->Report(diag::note_pch_vfsoverlay_empty) << Type; - } else { - std::string Files = llvm::join(VFSOverlays, "\n"); - Diags->Report(diag::note_pch_vfsoverlay_files) << Type << Files; - } - }; - VFSNote(0, HSOpts.VFSOverlayFiles); - VFSNote(1, ExistingHSOpts.VFSOverlayFiles); - } - } - } - return false; -} - -using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles); - -/// A listener that collects the imported modules and the input -/// files. While visiting, collect vfsoverlays and file inputs that determine -/// whether prebuilt modules fully resolve in stable directories. -class PrebuiltModuleListener : public ASTReaderListener { -public: - PrebuiltModuleListener(PrebuiltModuleFilesT &PrebuiltModuleFiles, - llvm::SmallVector &NewModuleFiles, - PrebuiltModulesAttrsMap &PrebuiltModulesASTMap, - const HeaderSearchOptions &HSOpts, - const LangOptions &LangOpts, DiagnosticsEngine &Diags, - const ArrayRef StableDirs) - : PrebuiltModuleFiles(PrebuiltModuleFiles), - NewModuleFiles(NewModuleFiles), - PrebuiltModulesASTMap(PrebuiltModulesASTMap), ExistingHSOpts(HSOpts), - ExistingLangOpts(LangOpts), Diags(Diags), StableDirs(StableDirs) {} - - bool needsImportVisitation() const override { return true; } - bool needsInputFileVisitation() override { return true; } - bool needsSystemInputFileVisitation() override { return true; } - - /// Accumulate the modules are transitively depended on by the initial - /// prebuilt module. - void visitImport(StringRef ModuleName, StringRef Filename) override { - if (PrebuiltModuleFiles.insert({ModuleName.str(), Filename.str()}).second) - NewModuleFiles.push_back(Filename.str()); - - auto PrebuiltMapEntry = PrebuiltModulesASTMap.try_emplace(Filename); - PrebuiltModuleASTAttrs &PrebuiltModule = PrebuiltMapEntry.first->second; - if (PrebuiltMapEntry.second) - PrebuiltModule.setInStableDir(!StableDirs.empty()); - - if (auto It = PrebuiltModulesASTMap.find(CurrentFile); - It != PrebuiltModulesASTMap.end() && CurrentFile != Filename) - PrebuiltModule.addDependent(It->getKey()); - } - - /// For each input file discovered, check whether it's external path is in a - /// stable directory. Traversal is stopped if the current module is not - /// considered stable. - bool visitInputFileAsRequested(StringRef FilenameAsRequested, - StringRef Filename, bool isSystem, - bool isOverridden, - bool isExplicitModule) override { - if (StableDirs.empty()) - return false; - auto PrebuiltEntryIt = PrebuiltModulesASTMap.find(CurrentFile); - if ((PrebuiltEntryIt == PrebuiltModulesASTMap.end()) || - (!PrebuiltEntryIt->second.isInStableDir())) - return false; - - PrebuiltEntryIt->second.setInStableDir( - isPathInStableDir(StableDirs, Filename)); - return PrebuiltEntryIt->second.isInStableDir(); - } - - /// Update which module that is being actively traversed. - void visitModuleFile(StringRef Filename, - serialization::ModuleKind Kind) override { - // If the CurrentFile is not - // considered stable, update any of it's transitive dependents. - auto PrebuiltEntryIt = PrebuiltModulesASTMap.find(CurrentFile); - if ((PrebuiltEntryIt != PrebuiltModulesASTMap.end()) && - !PrebuiltEntryIt->second.isInStableDir()) - PrebuiltEntryIt->second.updateDependentsNotInStableDirs( - PrebuiltModulesASTMap); - CurrentFile = Filename; - } - - /// Check the header search options for a given module when considering - /// if the module comes from stable directories. - bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts, - StringRef ModuleFilename, - StringRef SpecificModuleCachePath, - bool Complain) override { - - auto PrebuiltMapEntry = PrebuiltModulesASTMap.try_emplace(CurrentFile); - PrebuiltModuleASTAttrs &PrebuiltModule = PrebuiltMapEntry.first->second; - if (PrebuiltMapEntry.second) - PrebuiltModule.setInStableDir(!StableDirs.empty()); - - if (PrebuiltModule.isInStableDir()) - PrebuiltModule.setInStableDir(areOptionsInStableDir(StableDirs, HSOpts)); - - return false; - } - - /// Accumulate vfsoverlays used to build these prebuilt modules. - bool ReadHeaderSearchPaths(const HeaderSearchOptions &HSOpts, - bool Complain) override { - - auto PrebuiltMapEntry = PrebuiltModulesASTMap.try_emplace(CurrentFile); - PrebuiltModuleASTAttrs &PrebuiltModule = PrebuiltMapEntry.first->second; - if (PrebuiltMapEntry.second) - PrebuiltModule.setInStableDir(!StableDirs.empty()); - - PrebuiltModule.setVFS( - llvm::StringSet<>(llvm::from_range, HSOpts.VFSOverlayFiles)); - - return checkHeaderSearchPaths( - HSOpts, ExistingHSOpts, Complain ? &Diags : nullptr, ExistingLangOpts); - } - -private: - PrebuiltModuleFilesT &PrebuiltModuleFiles; - llvm::SmallVector &NewModuleFiles; - PrebuiltModulesAttrsMap &PrebuiltModulesASTMap; - const HeaderSearchOptions &ExistingHSOpts; - const LangOptions &ExistingLangOpts; - DiagnosticsEngine &Diags; - std::string CurrentFile; - const ArrayRef StableDirs; -}; - -/// Visit the given prebuilt module and collect all of the modules it -/// transitively imports and contributing input files. -static bool visitPrebuiltModule(StringRef PrebuiltModuleFilename, - CompilerInstance &CI, - PrebuiltModuleFilesT &ModuleFiles, - PrebuiltModulesAttrsMap &PrebuiltModulesASTMap, - DiagnosticsEngine &Diags, - const ArrayRef StableDirs) { - // List of module files to be processed. - llvm::SmallVector Worklist; - - PrebuiltModuleListener Listener(ModuleFiles, Worklist, PrebuiltModulesASTMap, - CI.getHeaderSearchOpts(), CI.getLangOpts(), - Diags, StableDirs); - - Listener.visitModuleFile(PrebuiltModuleFilename, - serialization::MK_ExplicitModule); - if (ASTReader::readASTFileControlBlock( - PrebuiltModuleFilename, CI.getFileManager(), CI.getModuleCache(), - CI.getPCHContainerReader(), - /*FindModuleFileExtensions=*/false, Listener, - /*ValidateDiagnosticOptions=*/false, ASTReader::ARR_OutOfDate)) - return true; - - while (!Worklist.empty()) { - Listener.visitModuleFile(Worklist.back(), serialization::MK_ExplicitModule); - if (ASTReader::readASTFileControlBlock( - Worklist.pop_back_val(), CI.getFileManager(), CI.getModuleCache(), - CI.getPCHContainerReader(), - /*FindModuleFileExtensions=*/false, Listener, - /*ValidateDiagnosticOptions=*/false)) - return true; - } - return false; -} - -/// Transform arbitrary file name into an object-like file name. -static std::string makeObjFileName(StringRef FileName) { - SmallString<128> ObjFileName(FileName); - llvm::sys::path::replace_extension(ObjFileName, "o"); - return std::string(ObjFileName); -} - -/// Deduce the dependency target based on the output file and input files. -static std::string -deduceDepTarget(const std::string &OutputFile, - const SmallVectorImpl &InputFiles) { - if (OutputFile != "-") - return OutputFile; - - if (InputFiles.empty() || !InputFiles.front().isFile()) - return "clang-scan-deps\\ dependency"; - - return makeObjFileName(InputFiles.front().getFile()); -} - -/// Sanitize diagnostic options for dependency scan. -static void sanitizeDiagOpts(DiagnosticOptions &DiagOpts) { - // Don't print 'X warnings and Y errors generated'. - DiagOpts.ShowCarets = false; - // Don't write out diagnostic file. - DiagOpts.DiagnosticSerializationFile.clear(); - // Don't emit warnings except for scanning specific warnings. - // TODO: It would be useful to add a more principled way to ignore all - // warnings that come from source code. The issue is that we need to - // ignore warnings that could be surpressed by - // `#pragma clang diagnostic`, while still allowing some scanning - // warnings for things we're not ready to turn into errors yet. - // See `test/ClangScanDeps/diagnostic-pragmas.c` for an example. - llvm::erase_if(DiagOpts.Warnings, [](StringRef Warning) { - return llvm::StringSwitch(Warning) - .Cases("pch-vfs-diff", "error=pch-vfs-diff", false) - .StartsWith("no-error=", false) - .Default(true); - }); -} - -// Clang implements -D and -U by splatting text into a predefines buffer. This -// allows constructs such as `-DFඞ=3 "-D F\u{0D9E} 4 3 2”` to be accepted and -// define the same macro, or adding C++ style comments before the macro name. -// -// This function checks that the first non-space characters in the macro -// obviously form an identifier that can be uniqued on without lexing. Failing -// to do this could lead to changing the final definition of a macro. -// -// We could set up a preprocessor and actually lex the name, but that's very -// heavyweight for a situation that will almost never happen in practice. -static std::optional getSimpleMacroName(StringRef Macro) { - StringRef Name = Macro.split("=").first.ltrim(" \t"); - std::size_t I = 0; - - auto FinishName = [&]() -> std::optional { - StringRef SimpleName = Name.slice(0, I); - if (SimpleName.empty()) - return std::nullopt; - return SimpleName; - }; - - for (; I != Name.size(); ++I) { - switch (Name[I]) { - case '(': // Start of macro parameter list - case ' ': // End of macro name - case '\t': - return FinishName(); - case '_': - continue; - default: - if (llvm::isAlnum(Name[I])) - continue; - return std::nullopt; - } - } - return FinishName(); -} - -static void canonicalizeDefines(PreprocessorOptions &PPOpts) { - using MacroOpt = std::pair; - std::vector SimpleNames; - SimpleNames.reserve(PPOpts.Macros.size()); - std::size_t Index = 0; - for (const auto &M : PPOpts.Macros) { - auto SName = getSimpleMacroName(M.first); - // Skip optimizing if we can't guarantee we can preserve relative order. - if (!SName) - return; - SimpleNames.emplace_back(*SName, Index); - ++Index; - } - - llvm::stable_sort(SimpleNames, llvm::less_first()); - // Keep the last instance of each macro name by going in reverse - auto NewEnd = std::unique( - SimpleNames.rbegin(), SimpleNames.rend(), - [](const MacroOpt &A, const MacroOpt &B) { return A.first == B.first; }); - SimpleNames.erase(SimpleNames.begin(), NewEnd.base()); - - // Apply permutation. - decltype(PPOpts.Macros) NewMacros; - NewMacros.reserve(SimpleNames.size()); - for (std::size_t I = 0, E = SimpleNames.size(); I != E; ++I) { - std::size_t OriginalIndex = SimpleNames[I].second; - // We still emit undefines here as they may be undefining a predefined macro - NewMacros.push_back(std::move(PPOpts.Macros[OriginalIndex])); - } - std::swap(PPOpts.Macros, NewMacros); -} - -class ScanningDependencyDirectivesGetter : public DependencyDirectivesGetter { - DependencyScanningWorkerFilesystem *DepFS; - -public: - ScanningDependencyDirectivesGetter(FileManager &FileMgr) : DepFS(nullptr) { - FileMgr.getVirtualFileSystem().visit([&](llvm::vfs::FileSystem &FS) { - auto *DFS = llvm::dyn_cast(&FS); - if (DFS) { - assert(!DepFS && "Found multiple scanning VFSs"); - DepFS = DFS; - } - }); - assert(DepFS && "Did not find scanning VFS"); - } - - std::unique_ptr - cloneFor(FileManager &FileMgr) override { - return std::make_unique(FileMgr); - } - - std::optional> - operator()(FileEntryRef File) override { - return DepFS->getDirectiveTokens(File.getName()); - } -}; - -/// A clang tool that runs the preprocessor in a mode that's optimized for -/// dependency scanning for the given compiler invocation. -class DependencyScanningAction { -public: - DependencyScanningAction( - DependencyScanningService &Service, StringRef WorkingDirectory, - DependencyConsumer &Consumer, DependencyActionController &Controller, - llvm::IntrusiveRefCntPtr DepFS, - std::optional ModuleName = std::nullopt) - : Service(Service), WorkingDirectory(WorkingDirectory), - Consumer(Consumer), Controller(Controller), DepFS(std::move(DepFS)), - ModuleName(ModuleName) {} - - bool runInvocation(std::shared_ptr Invocation, - IntrusiveRefCntPtr FS, - std::shared_ptr PCHContainerOps, - DiagnosticConsumer *DiagConsumer) { - // Making sure that we canonicalize the defines before we create the deep - // copy to avoid unnecessary variants in the scanner and in the resulting - // explicit command lines. - if (any(Service.getOptimizeArgs() & ScanningOptimizations::Macros)) - canonicalizeDefines(Invocation->getPreprocessorOpts()); - - // Make a deep copy of the original Clang invocation. - CompilerInvocation OriginalInvocation(*Invocation); - - if (Scanned) { - // Scanning runs once for the first -cc1 invocation in a chain of driver - // jobs. For any dependent jobs, reuse the scanning result and just - // update the LastCC1Arguments to correspond to the new invocation. - // FIXME: to support multi-arch builds, each arch requires a separate scan - setLastCC1Arguments(std::move(OriginalInvocation)); - return true; - } - - Scanned = true; - - // Create a compiler instance to handle the actual work. - auto ModCache = makeInProcessModuleCache(Service.getModuleCacheEntries()); - ScanInstanceStorage.emplace(std::move(Invocation), - std::move(PCHContainerOps), ModCache.get()); - CompilerInstance &ScanInstance = *ScanInstanceStorage; - ScanInstance.setBuildingModule(false); - - ScanInstance.createVirtualFileSystem(FS, DiagConsumer); - - // Create the compiler's actual diagnostics engine. - sanitizeDiagOpts(ScanInstance.getDiagnosticOpts()); - assert(!DiagConsumerFinished && "attempt to reuse finished consumer"); - ScanInstance.createDiagnostics(DiagConsumer, /*ShouldOwnClient=*/false); - if (!ScanInstance.hasDiagnostics()) - return false; - - ScanInstance.getPreprocessorOpts().AllowPCHWithDifferentModulesCachePath = - true; - - if (ScanInstance.getHeaderSearchOpts().ModulesValidateOncePerBuildSession) - ScanInstance.getHeaderSearchOpts().BuildSessionTimestamp = - Service.getBuildSessionTimestamp(); - - ScanInstance.getFrontendOpts().DisableFree = false; - ScanInstance.getFrontendOpts().GenerateGlobalModuleIndex = false; - ScanInstance.getFrontendOpts().UseGlobalModuleIndex = false; - // This will prevent us compiling individual modules asynchronously since - // FileManager is not thread-safe, but it does improve performance for now. - ScanInstance.getFrontendOpts().ModulesShareFileManager = true; - ScanInstance.getHeaderSearchOpts().ModuleFormat = "raw"; - ScanInstance.getHeaderSearchOpts().ModulesIncludeVFSUsage = - any(Service.getOptimizeArgs() & ScanningOptimizations::VFS); - - // Create a new FileManager to match the invocation's FileSystemOptions. - auto *FileMgr = ScanInstance.createFileManager(); - - // Use the dependency scanning optimized file system if requested to do so. - if (DepFS) { - DepFS->resetBypassedPathPrefix(); - if (!ScanInstance.getHeaderSearchOpts().ModuleCachePath.empty()) { - SmallString<256> ModulesCachePath; - normalizeModuleCachePath( - *FileMgr, ScanInstance.getHeaderSearchOpts().ModuleCachePath, - ModulesCachePath); - DepFS->setBypassedPathPrefix(ModulesCachePath); - } - - ScanInstance.setDependencyDirectivesGetter( - std::make_unique(*FileMgr)); - } - - ScanInstance.createSourceManager(*FileMgr); - - // Create a collection of stable directories derived from the ScanInstance - // for determining whether module dependencies would fully resolve from - // those directories. - llvm::SmallVector StableDirs; - const StringRef Sysroot = ScanInstance.getHeaderSearchOpts().Sysroot; - if (!Sysroot.empty() && - (llvm::sys::path::root_directory(Sysroot) != Sysroot)) - StableDirs = {Sysroot, ScanInstance.getHeaderSearchOpts().ResourceDir}; - - // Store a mapping of prebuilt module files and their properties like header - // search options. This will prevent the implicit build to create duplicate - // modules and will force reuse of the existing prebuilt module files - // instead. - PrebuiltModulesAttrsMap PrebuiltModulesASTMap; - - if (!ScanInstance.getPreprocessorOpts().ImplicitPCHInclude.empty()) - if (visitPrebuiltModule( - ScanInstance.getPreprocessorOpts().ImplicitPCHInclude, - ScanInstance, - ScanInstance.getHeaderSearchOpts().PrebuiltModuleFiles, - PrebuiltModulesASTMap, ScanInstance.getDiagnostics(), StableDirs)) - return false; - - // Create the dependency collector that will collect the produced - // dependencies. - // - // This also moves the existing dependency output options from the - // invocation to the collector. The options in the invocation are reset, - // which ensures that the compiler won't create new dependency collectors, - // and thus won't write out the extra '.d' files to disk. - auto Opts = std::make_unique(); - std::swap(*Opts, ScanInstance.getInvocation().getDependencyOutputOpts()); - // We need at least one -MT equivalent for the generator of make dependency - // files to work. - if (Opts->Targets.empty()) - Opts->Targets = { - deduceDepTarget(ScanInstance.getFrontendOpts().OutputFile, - ScanInstance.getFrontendOpts().Inputs)}; - Opts->IncludeSystemHeaders = true; - - switch (Service.getFormat()) { - case ScanningOutputFormat::Make: - ScanInstance.addDependencyCollector( - std::make_shared( - std::move(Opts), WorkingDirectory, Consumer)); - break; - case ScanningOutputFormat::P1689: - case ScanningOutputFormat::Full: - MDC = std::make_shared( - Service, std::move(Opts), ScanInstance, Consumer, Controller, - OriginalInvocation, std::move(PrebuiltModulesASTMap), StableDirs); - ScanInstance.addDependencyCollector(MDC); - break; - } - - // Consider different header search and diagnostic options to create - // different modules. This avoids the unsound aliasing of module PCMs. - // - // TODO: Implement diagnostic bucketing to reduce the impact of strict - // context hashing. - ScanInstance.getHeaderSearchOpts().ModulesStrictContextHash = true; - ScanInstance.getHeaderSearchOpts().ModulesSerializeOnlyPreprocessor = true; - ScanInstance.getHeaderSearchOpts().ModulesSkipDiagnosticOptions = true; - ScanInstance.getHeaderSearchOpts().ModulesSkipHeaderSearchPaths = true; - ScanInstance.getHeaderSearchOpts().ModulesSkipPragmaDiagnosticMappings = - true; - ScanInstance.getHeaderSearchOpts().ModulesForceValidateUserHeaders = false; - - // Avoid some checks and module map parsing when loading PCM files. - ScanInstance.getPreprocessorOpts().ModulesCheckRelocated = false; - - std::unique_ptr Action; - - if (Service.getFormat() == ScanningOutputFormat::P1689) - Action = std::make_unique(); - else if (ModuleName) - Action = std::make_unique(*ModuleName); - else - Action = std::make_unique(); - - if (ScanInstance.getDiagnostics().hasErrorOccurred()) - return false; - - const bool Result = ScanInstance.ExecuteAction(*Action); - - // ExecuteAction is responsible for calling finish. - DiagConsumerFinished = true; - - if (Result) - setLastCC1Arguments(std::move(OriginalInvocation)); - - return Result; - } - - bool hasScanned() const { return Scanned; } - bool hasDiagConsumerFinished() const { return DiagConsumerFinished; } - - /// Take the cc1 arguments corresponding to the most recent invocation used - /// with this action. Any modifications implied by the discovered dependencies - /// will have already been applied. - std::vector takeLastCC1Arguments() { - std::vector Result; - std::swap(Result, LastCC1Arguments); // Reset LastCC1Arguments to empty. - return Result; - } - -private: - void setLastCC1Arguments(CompilerInvocation &&CI) { - if (MDC) - MDC->applyDiscoveredDependencies(CI); - LastCC1Arguments = CI.getCC1CommandLine(); - } - - DependencyScanningService &Service; - StringRef WorkingDirectory; - DependencyConsumer &Consumer; - DependencyActionController &Controller; - llvm::IntrusiveRefCntPtr DepFS; - std::optional ModuleName; - std::optional ScanInstanceStorage; - std::shared_ptr MDC; - std::vector LastCC1Arguments; - bool Scanned = false; - bool DiagConsumerFinished = false; -}; - -} // end anonymous namespace - DependencyScanningWorker::DependencyScanningWorker( DependencyScanningService &Service, llvm::IntrusiveRefCntPtr FS) diff --git a/clang/test/AST/ByteCode/cxx23.cpp b/clang/test/AST/ByteCode/cxx23.cpp index 72c751d627a44..ce0a4777ffa9b 100644 --- a/clang/test/AST/ByteCode/cxx23.cpp +++ b/clang/test/AST/ByteCode/cxx23.cpp @@ -1,8 +1,8 @@ // UNSUPPORTED: target={{.*}}-zos{{.*}} -// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -verify=ref,ref20,all,all20 %s -// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fcxx-exceptions -verify=ref,ref23,all,all23 %s -// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -verify=expected20,all,all20 %s -fexperimental-new-constant-interpreter -// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fcxx-exceptions -verify=expected23,all,all23 %s -fexperimental-new-constant-interpreter +// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -Wno-deprecated-volatile -verify=ref,ref20,all,all20 %s +// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fcxx-exceptions -Wno-deprecated-volatile -verify=ref,ref23,all,all23 %s +// RUN: %clang_cc1 -std=c++20 -fsyntax-only -fcxx-exceptions -Wno-deprecated-volatile -verify=expected20,all,all20 %s -fexperimental-new-constant-interpreter +// RUN: %clang_cc1 -std=c++23 -fsyntax-only -fcxx-exceptions -Wno-deprecated-volatile -verify=expected23,all,all23 %s -fexperimental-new-constant-interpreter #define assert_active(F) if (!__builtin_is_within_lifetime(&F)) (1/0); @@ -393,6 +393,59 @@ namespace UnionMemberCallDiags { static_assert(g()); // all-error {{not an integral constant expression}} \ // all-note {{in call to}} } +#endif + +namespace VolatileWrites { + constexpr void test1() {// all20-error {{never produces a constant expression}} + int k; + volatile int &m = k; + m = 10; // all20-note {{assignment to volatile-qualified type 'volatile int'}} + } + constexpr void test2() { // all20-error {{never produces a constant expression}} + volatile int k = 12; + k = 13; // all20-note {{assignment to volatile-qualified type 'volatile int'}} + } + + constexpr void test3() { // all20-error {{never produces a constant expression}} + volatile int k = 12; // all20-note {{volatile object declared here}} + + *((int *)&k) = 13; // all20-note {{assignment to volatile object 'k' is not allowed in a constant expression}} + } + + constexpr void test4() { // all20-error {{never produces a constant expression}} + int k = 12; + + *((volatile int *)&k) = 13; // all20-note {{assignment to volatile-qualified type 'volatile int' is not allowed in a constant expression}} + } + +#if __cplusplus >= 202302L + struct S { + volatile int k; + }; + constexpr int test5() { + S s; + s.k = 12; // all-note {{assignment to volatile-qualified type 'volatile int' is not}} + + return 0; + } + static_assert(test5() == 0); // all-error{{not an integral constant expression}} \ + // all-note {{in call to}} #endif + + constexpr bool test6(volatile int k) { // ref20-error {{never produces a constant expression}} + k = 14; // ref20-note {{assignment to volatile-qualified type 'volatile int' is not}} \ + // all-note {{assignment to volatile-qualified type 'volatile int' is not}} + return true; + } + static_assert(test6(5)); // all-error {{not an integral constant expression}} \ + // all-note {{in call to}} + + constexpr bool test7(volatile int k) { // all-note {{declared here}} + *((int *)&k) = 13; // all-note {{assignment to volatile object 'k' is not allowed in a constant expression}} + return true; + } + static_assert(test7(12)); // all-error {{not an integral constant expression}} \ + // all-note {{in call to}} +} diff --git a/clang/test/AST/ByteCode/invalid.cpp b/clang/test/AST/ByteCode/invalid.cpp index affb40eada870..00db27419e36b 100644 --- a/clang/test/AST/ByteCode/invalid.cpp +++ b/clang/test/AST/ByteCode/invalid.cpp @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -fexperimental-new-constant-interpreter -verify=expected,both %s -// RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -verify=ref,both %s +// RUN: %clang_cc1 -fcxx-exceptions -std=c++20 -verify=ref,both %s namespace Throw { diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp index f036b7e6a971d..e90163172d2df 100644 --- a/clang/test/CIR/CodeGen/complex.cpp +++ b/clang/test/CIR/CodeGen/complex.cpp @@ -1233,3 +1233,40 @@ void imag_on_const_scalar() { // OGCG: %[[A_ADDR:.*]] = alloca float, align 4 // OGCG: %[[B_ADDR:.*]] = alloca float, align 4 // OGCG: store float 0.000000e+00, ptr %[[B_ADDR]], align 4 + +void real_on_scalar_from_real_with_type_promotion() { + _Float16 _Complex a; + _Float16 b = __real__(__real__ a); +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex +// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float +// CIR: %[[A_REAL_F16:.*]] = cir.cast(floating, %[[A_REAL_F32]] : !cir.float), !cir.f16 +// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 +// LLVM: %[[B_ADDR]] = alloca half, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2 +// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1 +// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0 +// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1 +// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half +// LLVM: store half %[[A_REAL_F16]], ptr %[[B_ADDR]], align 2 + +// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2 +// OGCG: %[[B_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load half, ptr %[[A_REAL_PTR]], align 2 +// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// OGCG: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half +// OGCG: store half %[[A_REAL_F16]], ptr %[[B_ADDR]], align 2 diff --git a/clang/test/CIR/CodeGen/vector-ext.cpp b/clang/test/CIR/CodeGen/vector-ext.cpp index 287d016ff6d1e..8b5379a2b1a88 100644 --- a/clang/test/CIR/CodeGen/vector-ext.cpp +++ b/clang/test/CIR/CodeGen/vector-ext.cpp @@ -1295,4 +1295,30 @@ void foo23() { // OGCG: %[[NE_B_ZERO:.*]] = icmp ne <4 x i32> %[[TMP_B]], zeroinitializer // OGCG: %[[VEC_OR:.*]] = and <4 x i1> %[[NE_A_ZERO]], %[[NE_B_ZERO]] // OGCG: %[[RESULT:.*]] = sext <4 x i1> %[[VEC_OR]] to <4 x i32> -// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16 \ No newline at end of file +// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16 + +void logical_not() { + vi4 a; + vi4 b = !a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}}) %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[CONST_V0:.*]] = cir.const #cir.zero : !cir.vector<4 x !s32i> +// CIR: %[[RESULT:.*]] = cir.vec.cmp(eq, %[[TMP_A]], %[[CONST_V0]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// LLVM: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer +// LLVM: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// LLVM: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +// OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// OGCG: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer +// OGCG: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 diff --git a/clang/test/CIR/CodeGen/vector.cpp b/clang/test/CIR/CodeGen/vector.cpp index d66c7a3d2aba6..d8fdeea179288 100644 --- a/clang/test/CIR/CodeGen/vector.cpp +++ b/clang/test/CIR/CodeGen/vector.cpp @@ -1337,4 +1337,56 @@ void foo26() { // OGCG: %[[NE_B_ZERO:.*]] = icmp ne <4 x i32> %[[TMP_B]], zeroinitializer // OGCG: %[[VEC_OR:.*]] = and <4 x i1> %[[NE_A_ZERO]], %[[NE_B_ZERO]] // OGCG: %[[RESULT:.*]] = sext <4 x i1> %[[VEC_OR]] to <4 x i32> -// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16 \ No newline at end of file +// OGCG: store <4 x i32> %[[RESULT]], ptr %[[C_ADDR]], align 16 + +void logical_not() { + vi4 a; + vi4 b = !a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}}) %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !s32i> +// CIR: %[[CONST_V0:.*]] = cir.const #cir.zero : !cir.vector<4 x !s32i> +// CIR: %[[RESULT:.*]] = cir.vec.cmp(eq, %[[TMP_A]], %[[CONST_V0]]) : !cir.vector<4 x !s32i>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// LLVM: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer +// LLVM: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// LLVM: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +// OGCG: %[[A_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x i32>, ptr %[[A_ADDR]], align 16 +// OGCG: %[[RESULT:.*]] = icmp eq <4 x i32> %[[TMP_A]], zeroinitializer +// OGCG: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +void logical_not_float() { + vf4 a; + vi4 b = !a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.vector<4 x !cir.float>, !cir.ptr>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.vector<4 x !s32i>, !cir.ptr>, ["b", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.vector<4 x !cir.float> +// CIR: %[[CONST_V0:.*]] = cir.const #cir.zero : !cir.vector<4 x !cir.float> +// CIR: %[[RESULT:.*]] = cir.vec.cmp(eq, %[[TMP_A]], %[[CONST_V0]]) : !cir.vector<4 x !cir.float>, !cir.vector<4 x !s32i> +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.vector<4 x !s32i>, !cir.ptr> + +// LLVM: %[[A_ADDR:.*]] = alloca <4 x float>, i64 1, align 16 +// LLVM: %[[B_ADDR:.*]] = alloca <4 x i32>, i64 1, align 16 +// LLVM: %[[TMP_A:.*]] = load <4 x float>, ptr %[[A_ADDR]], align 16 +// LLVM: %[[RESULT:.*]] = fcmp oeq <4 x float> %[[TMP_A]], zeroinitializer +// LLVM: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// LLVM: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 + +// OGCG: %[[A_ADDR:.*]] = alloca <4 x float>, align 16 +// OGCG: %[[B_ADDR:.*]] = alloca <4 x i32>, align 16 +// OGCG: %[[TMP_A:.*]] = load <4 x float>, ptr %[[A_ADDR]], align 16 +// OGCG: %[[RESULT:.*]] = fcmp oeq <4 x float> %[[TMP_A]], zeroinitializer +// OGCG: %[[RESULT_VI4:.*]] = sext <4 x i1> %[[RESULT]] to <4 x i32> +// OGCG: store <4 x i32> %[[RESULT_VI4]], ptr %[[B_ADDR]], align 16 diff --git a/clang/test/CodeGen/atomic-test-and-set.c b/clang/test/CodeGen/atomic-test-and-set.c index 39d4cef16b21d..6438094567f33 100644 --- a/clang/test/CodeGen/atomic-test-and-set.c +++ b/clang/test/CodeGen/atomic-test-and-set.c @@ -81,7 +81,8 @@ void clear_dynamic(char *ptr, int order) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -99,7 +100,8 @@ void test_and_set_relaxed(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -117,7 +119,8 @@ void test_and_set_consume(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -135,7 +138,8 @@ void test_and_set_acquire(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -153,7 +157,8 @@ void test_and_set_release(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -171,7 +176,8 @@ void test_and_set_acq_rel(char *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -200,27 +206,32 @@ void test_and_set_seq_cst(char *ptr) { // CHECK: [[MONOTONIC]]: // CHECK-NEXT: [[TMP2:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP2]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE:.*]] // CHECK: [[ACQUIRE]]: // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 // CHECK-NEXT: [[TOBOOL1:%.*]] = icmp ne i8 [[TMP3]], 0 -// CHECK-NEXT: store i1 [[TOBOOL1]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT1:%.*]] = zext i1 [[TOBOOL1]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT1]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] // CHECK: [[RELEASE]]: // CHECK-NEXT: [[TMP4:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1 // CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i8 [[TMP4]], 0 -// CHECK-NEXT: store i1 [[TOBOOL2]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT2:%.*]] = zext i1 [[TOBOOL2]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT2]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] // CHECK: [[ACQREL]]: // CHECK-NEXT: [[TMP5:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1 // CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i8 [[TMP5]], 0 -// CHECK-NEXT: store i1 [[TOBOOL3]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT3:%.*]] = zext i1 [[TOBOOL3]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT3]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] // CHECK: [[SEQCST]]: // CHECK-NEXT: [[TMP6:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1 // CHECK-NEXT: [[TOBOOL4:%.*]] = icmp ne i8 [[TMP6]], 0 -// CHECK-NEXT: store i1 [[TOBOOL4]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT4:%.*]] = zext i1 [[TOBOOL4]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT4]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] // CHECK: [[ATOMIC_CONTINUE]]: // CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 @@ -239,7 +250,8 @@ void test_and_set_dynamic(char *ptr, int order) { // CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], ptr [[X]], i64 0, i64 0 // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw volatile xchg ptr [[ARRAYDECAY]], i8 1 seq_cst, align 4 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP0]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP1]] to i1 // CHECK-NEXT: ret void @@ -301,7 +313,8 @@ void clear_incomplete(struct incomplete *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 4 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -318,7 +331,8 @@ void test_and_set_int(int *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void @@ -335,7 +349,8 @@ void test_and_set_void(void *ptr) { // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 // CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 -// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TOBOOL_ZEXT:%.*]] = zext i1 [[TOBOOL]] to i8 +// CHECK-NEXT: store i8 [[TOBOOL_ZEXT]], ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 // CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/complex.c b/clang/test/CodeGen/complex.c index 6233529a18f8b..91fc9dda72f72 100644 --- a/clang/test/CodeGen/complex.c +++ b/clang/test/CodeGen/complex.c @@ -1,5 +1,81 @@ -// RUN: %clang_cc1 -emit-llvm-only %s +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 6 +// RUN: %clang_cc1 %s -emit-llvm -triple x86_64-unknown-unknown -o - | FileCheck %s +// CHECK-LABEL: define dso_local i32 @main( +// CHECK-SAME: ) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*]]: +// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[A:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: [[B:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: store double 5.000000e+00, ptr [[A_REALP]], align 8 +// CHECK-NEXT: store double 0.000000e+00, ptr [[A_IMAGP]], align 8 +// CHECK-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: store double 4.200000e+01, ptr [[B_REALP]], align 8 +// CHECK-NEXT: store double 0.000000e+00, ptr [[B_IMAGP]], align 8 +// CHECK-NEXT: [[A_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP1]], align 8 +// CHECK-NEXT: [[A_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP2]], align 8 +// CHECK-NEXT: [[B_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP3]], align 8 +// CHECK-NEXT: [[B_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP4]], align 8 +// CHECK-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]] +// CHECK-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]] +// CHECK-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]] +// CHECK-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]] +// CHECK-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]] +// CHECK-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]] +// CHECK-NEXT: [[ISNAN_CMP:%.*]] = fcmp uno double [[MUL_R]], [[MUL_R]] +// CHECK-NEXT: br i1 [[ISNAN_CMP]], label %[[COMPLEX_MUL_IMAG_NAN:.*]], label %[[COMPLEX_MUL_CONT:.*]], !prof [[PROF2:![0-9]+]] +// CHECK: [[COMPLEX_MUL_IMAG_NAN]]: +// CHECK-NEXT: [[ISNAN_CMP5:%.*]] = fcmp uno double [[MUL_I]], [[MUL_I]] +// CHECK-NEXT: br i1 [[ISNAN_CMP5]], label %[[COMPLEX_MUL_LIBCALL:.*]], label %[[COMPLEX_MUL_CONT]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_LIBCALL]]: +// CHECK-NEXT: [[CALL:%.*]] = call { double, double } @__muldc3(double noundef [[A_REAL]], double noundef [[A_IMAG]], double noundef [[B_REAL]], double noundef [[B_IMAG]]) #[[ATTR4:[0-9]+]] +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0 +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1 +// CHECK-NEXT: br label %[[COMPLEX_MUL_CONT]] +// CHECK: [[COMPLEX_MUL_CONT]]: +// CHECK-NEXT: [[REAL_MUL_PHI:%.*]] = phi double [ [[MUL_R]], %[[ENTRY]] ], [ [[MUL_R]], %[[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], %[[COMPLEX_MUL_LIBCALL]] ] +// CHECK-NEXT: [[IMAG_MUL_PHI:%.*]] = phi double [ [[MUL_I]], %[[ENTRY]] ], [ [[MUL_I]], %[[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], %[[COMPLEX_MUL_LIBCALL]] ] +// CHECK-NEXT: [[B_REALP6:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0 +// CHECK-NEXT: [[B_REAL7:%.*]] = load double, ptr [[B_REALP6]], align 8 +// CHECK-NEXT: [[B_IMAGP8:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1 +// CHECK-NEXT: [[B_IMAG9:%.*]] = load double, ptr [[B_IMAGP8]], align 8 +// CHECK-NEXT: [[A_REALP10:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[A_REAL11:%.*]] = load double, ptr [[A_REALP10]], align 8 +// CHECK-NEXT: [[A_IMAGP12:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: [[A_IMAG13:%.*]] = load double, ptr [[A_IMAGP12]], align 8 +// CHECK-NEXT: [[MUL_AC14:%.*]] = fmul double [[B_REAL7]], [[A_REAL11]] +// CHECK-NEXT: [[MUL_BD15:%.*]] = fmul double [[B_IMAG9]], [[A_IMAG13]] +// CHECK-NEXT: [[MUL_AD16:%.*]] = fmul double [[B_REAL7]], [[A_IMAG13]] +// CHECK-NEXT: [[MUL_BC17:%.*]] = fmul double [[B_IMAG9]], [[A_REAL11]] +// CHECK-NEXT: [[MUL_R18:%.*]] = fsub double [[MUL_AC14]], [[MUL_BD15]] +// CHECK-NEXT: [[MUL_I19:%.*]] = fadd double [[MUL_AD16]], [[MUL_BC17]] +// CHECK-NEXT: [[ISNAN_CMP20:%.*]] = fcmp uno double [[MUL_R18]], [[MUL_R18]] +// CHECK-NEXT: br i1 [[ISNAN_CMP20]], label %[[COMPLEX_MUL_IMAG_NAN21:.*]], label %[[COMPLEX_MUL_CONT25:.*]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_IMAG_NAN21]]: +// CHECK-NEXT: [[ISNAN_CMP22:%.*]] = fcmp uno double [[MUL_I19]], [[MUL_I19]] +// CHECK-NEXT: br i1 [[ISNAN_CMP22]], label %[[COMPLEX_MUL_LIBCALL23:.*]], label %[[COMPLEX_MUL_CONT25]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_LIBCALL23]]: +// CHECK-NEXT: [[CALL24:%.*]] = call { double, double } @__muldc3(double noundef [[B_REAL7]], double noundef [[B_IMAG9]], double noundef [[A_REAL11]], double noundef [[A_IMAG13]]) #[[ATTR4]] +// CHECK-NEXT: [[TMP2:%.*]] = extractvalue { double, double } [[CALL24]], 0 +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { double, double } [[CALL24]], 1 +// CHECK-NEXT: br label %[[COMPLEX_MUL_CONT25]] +// CHECK: [[COMPLEX_MUL_CONT25]]: +// CHECK-NEXT: [[REAL_MUL_PHI26:%.*]] = phi double [ [[MUL_R18]], %[[COMPLEX_MUL_CONT]] ], [ [[MUL_R18]], %[[COMPLEX_MUL_IMAG_NAN21]] ], [ [[TMP2]], %[[COMPLEX_MUL_LIBCALL23]] ] +// CHECK-NEXT: [[IMAG_MUL_PHI27:%.*]] = phi double [ [[MUL_I19]], %[[COMPLEX_MUL_CONT]] ], [ [[MUL_I19]], %[[COMPLEX_MUL_IMAG_NAN21]] ], [ [[TMP3]], %[[COMPLEX_MUL_LIBCALL23]] ] +// CHECK-NEXT: [[CMP_R:%.*]] = fcmp une double [[REAL_MUL_PHI]], [[REAL_MUL_PHI26]] +// CHECK-NEXT: [[CMP_I:%.*]] = fcmp une double [[IMAG_MUL_PHI]], [[IMAG_MUL_PHI27]] +// CHECK-NEXT: [[OR_RI:%.*]] = or i1 [[CMP_R]], [[CMP_I]] +// CHECK-NEXT: [[CONV:%.*]] = zext i1 [[OR_RI]] to i32 +// CHECK-NEXT: ret i32 [[CONV]] +// int main(void) { double _Complex a = 5; @@ -12,6 +88,36 @@ _Complex double bar(int); void test(_Complex double*); void takecomplex(_Complex double); +// CHECK-LABEL: define dso_local void @test2( +// CHECK-SAME: i32 noundef [[C:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[X:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: [[COERCE:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 +// CHECK-NEXT: [[CALL:%.*]] = call { double, double } @bar(i32 noundef 1) +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0 +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1 +// CHECK-NEXT: [[X_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[X]], i32 0, i32 0 +// CHECK-NEXT: [[X_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[X]], i32 0, i32 1 +// CHECK-NEXT: store double [[TMP0]], ptr [[X_REALP]], align 8 +// CHECK-NEXT: store double [[TMP1]], ptr [[X_IMAGP]], align 8 +// CHECK-NEXT: call void @test(ptr noundef [[X]]) +// CHECK-NEXT: [[X_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[X]], i32 0, i32 0 +// CHECK-NEXT: [[X_REAL:%.*]] = load double, ptr [[X_REALP1]], align 8 +// CHECK-NEXT: [[X_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[X]], i32 0, i32 1 +// CHECK-NEXT: [[X_IMAG:%.*]] = load double, ptr [[X_IMAGP2]], align 8 +// CHECK-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[COERCE]], i32 0, i32 0 +// CHECK-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[COERCE]], i32 0, i32 1 +// CHECK-NEXT: store double [[X_REAL]], ptr [[COERCE_REALP]], align 8 +// CHECK-NEXT: store double [[X_IMAG]], ptr [[COERCE_IMAGP]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[COERCE]], i32 0, i32 0 +// CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[COERCE]], i32 0, i32 1 +// CHECK-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP4]], align 8 +// CHECK-NEXT: call void @takecomplex(double noundef [[TMP3]], double noundef [[TMP5]]) +// CHECK-NEXT: ret void +// void test2(int c) { _Complex double X; X = bar(1); @@ -23,6 +129,104 @@ _Complex double g1, g2; _Complex float cf; double D; +// CHECK-LABEL: define dso_local void @test3( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*]]: +// CHECK-NEXT: [[GR:%.*]] = alloca double, align 8 +// CHECK-NEXT: [[G1_REAL:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G2_REAL:%.*]] = load double, ptr @g2, align 8 +// CHECK-NEXT: [[G2_IMAG:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g2, i32 0, i32 1), align 8 +// CHECK-NEXT: [[ADD_R:%.*]] = fadd double [[G1_REAL]], [[G2_REAL]] +// CHECK-NEXT: [[ADD_I:%.*]] = fadd double [[G1_IMAG]], [[G2_IMAG]] +// CHECK-NEXT: store double [[ADD_R]], ptr @g1, align 8 +// CHECK-NEXT: store double [[ADD_I]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL1:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG2:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G2_REAL3:%.*]] = load double, ptr @g2, align 8 +// CHECK-NEXT: [[G2_IMAG4:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g2, i32 0, i32 1), align 8 +// CHECK-NEXT: [[SUB_R:%.*]] = fsub double [[G1_REAL1]], [[G2_REAL3]] +// CHECK-NEXT: [[SUB_I:%.*]] = fsub double [[G1_IMAG2]], [[G2_IMAG4]] +// CHECK-NEXT: store double [[SUB_R]], ptr @g1, align 8 +// CHECK-NEXT: store double [[SUB_I]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL5:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG6:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G2_REAL7:%.*]] = load double, ptr @g2, align 8 +// CHECK-NEXT: [[G2_IMAG8:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g2, i32 0, i32 1), align 8 +// CHECK-NEXT: [[MUL_AC:%.*]] = fmul double [[G1_REAL5]], [[G2_REAL7]] +// CHECK-NEXT: [[MUL_BD:%.*]] = fmul double [[G1_IMAG6]], [[G2_IMAG8]] +// CHECK-NEXT: [[MUL_AD:%.*]] = fmul double [[G1_REAL5]], [[G2_IMAG8]] +// CHECK-NEXT: [[MUL_BC:%.*]] = fmul double [[G1_IMAG6]], [[G2_REAL7]] +// CHECK-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]] +// CHECK-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]] +// CHECK-NEXT: [[ISNAN_CMP:%.*]] = fcmp uno double [[MUL_R]], [[MUL_R]] +// CHECK-NEXT: br i1 [[ISNAN_CMP]], label %[[COMPLEX_MUL_IMAG_NAN:.*]], label %[[COMPLEX_MUL_CONT:.*]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_IMAG_NAN]]: +// CHECK-NEXT: [[ISNAN_CMP9:%.*]] = fcmp uno double [[MUL_I]], [[MUL_I]] +// CHECK-NEXT: br i1 [[ISNAN_CMP9]], label %[[COMPLEX_MUL_LIBCALL:.*]], label %[[COMPLEX_MUL_CONT]], !prof [[PROF2]] +// CHECK: [[COMPLEX_MUL_LIBCALL]]: +// CHECK-NEXT: [[CALL:%.*]] = call { double, double } @__muldc3(double noundef [[G1_REAL5]], double noundef [[G1_IMAG6]], double noundef [[G2_REAL7]], double noundef [[G2_IMAG8]]) #[[ATTR4]] +// CHECK-NEXT: [[TMP0:%.*]] = extractvalue { double, double } [[CALL]], 0 +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { double, double } [[CALL]], 1 +// CHECK-NEXT: br label %[[COMPLEX_MUL_CONT]] +// CHECK: [[COMPLEX_MUL_CONT]]: +// CHECK-NEXT: [[REAL_MUL_PHI:%.*]] = phi double [ [[MUL_R]], %[[ENTRY]] ], [ [[MUL_R]], %[[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], %[[COMPLEX_MUL_LIBCALL]] ] +// CHECK-NEXT: [[IMAG_MUL_PHI:%.*]] = phi double [ [[MUL_I]], %[[ENTRY]] ], [ [[MUL_I]], %[[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], %[[COMPLEX_MUL_LIBCALL]] ] +// CHECK-NEXT: store double [[REAL_MUL_PHI]], ptr @g1, align 8 +// CHECK-NEXT: store double [[IMAG_MUL_PHI]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL10:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG11:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[CONJ_I:%.*]] = fneg double [[G1_IMAG11]] +// CHECK-NEXT: [[NEG_R:%.*]] = fneg double [[G1_REAL10]] +// CHECK-NEXT: [[NEG_I:%.*]] = fneg double [[CONJ_I]] +// CHECK-NEXT: store double [[NEG_R]], ptr @g1, align 8 +// CHECK-NEXT: store double [[NEG_I]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: store double [[TMP2]], ptr [[GR]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[CF_REAL:%.*]] = load float, ptr @cf, align 4 +// CHECK-NEXT: [[CF_IMAG:%.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONV:%.*]] = fpext float [[CF_REAL]] to double +// CHECK-NEXT: [[CONV12:%.*]] = fpext float [[CF_IMAG]] to double +// CHECK-NEXT: [[ADD_R13:%.*]] = fadd double [[CONV]], [[TMP3]] +// CHECK-NEXT: [[CONV14:%.*]] = fptrunc double [[ADD_R13]] to float +// CHECK-NEXT: [[CONV15:%.*]] = fptrunc double [[CONV12]] to float +// CHECK-NEXT: store float [[CONV14]], ptr @cf, align 4 +// CHECK-NEXT: store float [[CONV15]], ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CF_REAL16:%.*]] = load float, ptr @cf, align 4 +// CHECK-NEXT: [[CF_IMAG17:%.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONV18:%.*]] = fpext float [[CF_REAL16]] to double +// CHECK-NEXT: [[CONV19:%.*]] = fpext float [[CF_IMAG17]] to double +// CHECK-NEXT: [[TMP4:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[ADD_R20:%.*]] = fadd double [[TMP4]], [[CONV18]] +// CHECK-NEXT: store double [[ADD_R20]], ptr @D, align 8 +// CHECK-NEXT: [[G1_REAL21:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG22:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[CF_REAL23:%.*]] = load float, ptr @cf, align 4 +// CHECK-NEXT: [[CF_IMAG24:%.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONV25:%.*]] = fpext float [[CF_REAL23]] to double +// CHECK-NEXT: [[CONV26:%.*]] = fpext float [[CF_IMAG24]] to double +// CHECK-NEXT: [[CALL27:%.*]] = call { double, double } @__divdc3(double noundef [[CONV25]], double noundef [[CONV26]], double noundef [[G1_REAL21]], double noundef [[G1_IMAG22]]) #[[ATTR4]] +// CHECK-NEXT: [[TMP5:%.*]] = extractvalue { double, double } [[CALL27]], 0 +// CHECK-NEXT: [[TMP6:%.*]] = extractvalue { double, double } [[CALL27]], 1 +// CHECK-NEXT: [[CONV28:%.*]] = fptrunc double [[TMP5]] to float +// CHECK-NEXT: [[CONV29:%.*]] = fptrunc double [[TMP6]] to float +// CHECK-NEXT: store float [[CONV28]], ptr @cf, align 4 +// CHECK-NEXT: store float [[CONV29]], ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[G1_REAL30:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG31:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[TMP7:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[ADD_R32:%.*]] = fadd double [[G1_REAL30]], [[TMP7]] +// CHECK-NEXT: store double [[ADD_R32]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG31]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[TMP8:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[G1_REAL33:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG34:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[ADD_R35:%.*]] = fadd double [[TMP8]], [[G1_REAL33]] +// CHECK-NEXT: store double [[ADD_R35]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG34]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: ret void +// void test3(void) { g1 = g1 + g2; g1 = g1 - g2; @@ -41,6 +245,101 @@ void test3(void) { __complex__ int ci1, ci2; __complex__ short cs; int i; +// CHECK-LABEL: define dso_local void @test3int( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[CI1_REAL:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI2_REAL:%.*]] = load i32, ptr @ci2, align 4 +// CHECK-NEXT: [[CI2_IMAG:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci2, i32 0, i32 1), align 4 +// CHECK-NEXT: [[ADD_R:%.*]] = add i32 [[CI1_REAL]], [[CI2_REAL]] +// CHECK-NEXT: [[ADD_I:%.*]] = add i32 [[CI1_IMAG]], [[CI2_IMAG]] +// CHECK-NEXT: store i32 [[ADD_R]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[ADD_I]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL1:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG2:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI2_REAL3:%.*]] = load i32, ptr @ci2, align 4 +// CHECK-NEXT: [[CI2_IMAG4:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci2, i32 0, i32 1), align 4 +// CHECK-NEXT: [[SUB_R:%.*]] = sub i32 [[CI1_REAL1]], [[CI2_REAL3]] +// CHECK-NEXT: [[SUB_I:%.*]] = sub i32 [[CI1_IMAG2]], [[CI2_IMAG4]] +// CHECK-NEXT: store i32 [[SUB_R]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[SUB_I]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL5:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG6:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI2_REAL7:%.*]] = load i32, ptr @ci2, align 4 +// CHECK-NEXT: [[CI2_IMAG8:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci2, i32 0, i32 1), align 4 +// CHECK-NEXT: [[MUL_RL:%.*]] = mul i32 [[CI1_REAL5]], [[CI2_REAL7]] +// CHECK-NEXT: [[MUL_RR:%.*]] = mul i32 [[CI1_IMAG6]], [[CI2_IMAG8]] +// CHECK-NEXT: [[MUL_R:%.*]] = sub i32 [[MUL_RL]], [[MUL_RR]] +// CHECK-NEXT: [[MUL_IL:%.*]] = mul i32 [[CI1_IMAG6]], [[CI2_REAL7]] +// CHECK-NEXT: [[MUL_IR:%.*]] = mul i32 [[CI1_REAL5]], [[CI2_IMAG8]] +// CHECK-NEXT: [[MUL_I:%.*]] = add i32 [[MUL_IL]], [[MUL_IR]] +// CHECK-NEXT: store i32 [[MUL_R]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[MUL_I]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL9:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG10:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONJ_I:%.*]] = sub i32 0, [[CI1_IMAG10]] +// CHECK-NEXT: [[NEG_R:%.*]] = sub i32 0, [[CI1_REAL9]] +// CHECK-NEXT: [[NEG_I:%.*]] = sub i32 0, [[CONJ_I]] +// CHECK-NEXT: store i32 [[NEG_R]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[NEG_I]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[TMP0]], ptr @i, align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @i, align 4 +// CHECK-NEXT: [[CS_REAL:%.*]] = load i16, ptr @cs, align 2 +// CHECK-NEXT: [[CS_IMAG:%.*]] = load i16, ptr getelementptr inbounds nuw ({ i16, i16 }, ptr @cs, i32 0, i32 1), align 2 +// CHECK-NEXT: [[CONV:%.*]] = sext i16 [[CS_REAL]] to i32 +// CHECK-NEXT: [[CONV11:%.*]] = sext i16 [[CS_IMAG]] to i32 +// CHECK-NEXT: [[ADD_R12:%.*]] = add i32 [[CONV]], [[TMP1]] +// CHECK-NEXT: [[ADD_I13:%.*]] = add i32 [[CONV11]], 0 +// CHECK-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD_R12]] to i16 +// CHECK-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD_I13]] to i16 +// CHECK-NEXT: store i16 [[CONV14]], ptr @cs, align 2 +// CHECK-NEXT: store i16 [[CONV15]], ptr getelementptr inbounds nuw ({ i16, i16 }, ptr @cs, i32 0, i32 1), align 2 +// CHECK-NEXT: [[CF_REAL:%.*]] = load float, ptr @cf, align 4 +// CHECK-NEXT: [[CF_IMAG:%.*]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CONV16:%.*]] = fpext float [[CF_REAL]] to double +// CHECK-NEXT: [[CONV17:%.*]] = fpext float [[CF_IMAG]] to double +// CHECK-NEXT: [[TMP2:%.*]] = load double, ptr @D, align 8 +// CHECK-NEXT: [[ADD_R18:%.*]] = fadd double [[TMP2]], [[CONV16]] +// CHECK-NEXT: store double [[ADD_R18]], ptr @D, align 8 +// CHECK-NEXT: [[CI1_REAL19:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG20:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CS_REAL21:%.*]] = load i16, ptr @cs, align 2 +// CHECK-NEXT: [[CS_IMAG22:%.*]] = load i16, ptr getelementptr inbounds nuw ({ i16, i16 }, ptr @cs, i32 0, i32 1), align 2 +// CHECK-NEXT: [[CONV23:%.*]] = sext i16 [[CS_REAL21]] to i32 +// CHECK-NEXT: [[CONV24:%.*]] = sext i16 [[CS_IMAG22]] to i32 +// CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[CONV23]], [[CI1_REAL19]] +// CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[CONV24]], [[CI1_IMAG20]] +// CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[CI1_REAL19]], [[CI1_REAL19]] +// CHECK-NEXT: [[TMP7:%.*]] = mul i32 [[CI1_IMAG20]], [[CI1_IMAG20]] +// CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP6]], [[TMP7]] +// CHECK-NEXT: [[TMP9:%.*]] = mul i32 [[CONV24]], [[CI1_REAL19]] +// CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[CONV23]], [[CI1_IMAG20]] +// CHECK-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], [[TMP10]] +// CHECK-NEXT: [[TMP12:%.*]] = sdiv i32 [[TMP5]], [[TMP8]] +// CHECK-NEXT: [[TMP13:%.*]] = sdiv i32 [[TMP11]], [[TMP8]] +// CHECK-NEXT: [[CONV25:%.*]] = trunc i32 [[TMP12]] to i16 +// CHECK-NEXT: [[CONV26:%.*]] = trunc i32 [[TMP13]] to i16 +// CHECK-NEXT: store i16 [[CONV25]], ptr @cs, align 2 +// CHECK-NEXT: store i16 [[CONV26]], ptr getelementptr inbounds nuw ({ i16, i16 }, ptr @cs, i32 0, i32 1), align 2 +// CHECK-NEXT: [[CI1_REAL27:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG28:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr @i, align 4 +// CHECK-NEXT: [[ADD_R29:%.*]] = add i32 [[CI1_REAL27]], [[TMP14]] +// CHECK-NEXT: [[ADD_I30:%.*]] = add i32 [[CI1_IMAG28]], 0 +// CHECK-NEXT: store i32 [[ADD_R29]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[ADD_I30]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr @i, align 4 +// CHECK-NEXT: [[CI1_REAL31:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG32:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[ADD_R33:%.*]] = add i32 [[TMP15]], [[CI1_REAL31]] +// CHECK-NEXT: [[ADD_I34:%.*]] = add i32 0, [[CI1_IMAG32]] +// CHECK-NEXT: store i32 [[ADD_R33]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[ADD_I34]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: ret void +// void test3int(void) { ci1 = ci1 + ci2; ci1 = ci1 - ci2; @@ -56,15 +355,37 @@ void test3int(void) { ci1 = i + ci1; } +// CHECK-LABEL: define dso_local void @t1( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: store float 4.000000e+00, ptr @cf, align 4 +// CHECK-NEXT: ret void +// void t1(void) { (__real__ cf) = 4.0; } +// CHECK-LABEL: define dso_local void @t2( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: store float 4.000000e+00, ptr getelementptr inbounds nuw ({ float, float }, ptr @cf, i32 0, i32 1), align 4 +// CHECK-NEXT: ret void +// void t2(void) { (__imag__ cf) = 4.0; } // PR1960 +// CHECK-LABEL: define dso_local void @t3( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[V:%.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: [[V_REALP:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[V]], i32 0, i32 0 +// CHECK-NEXT: [[V_IMAGP:%.*]] = getelementptr inbounds nuw { i64, i64 }, ptr [[V]], i32 0, i32 1 +// CHECK-NEXT: store i64 2, ptr [[V_REALP]], align 8 +// CHECK-NEXT: store i64 0, ptr [[V_IMAGP]], align 8 +// CHECK-NEXT: ret void +// void t3(void) { __complex__ long long v = 2; } @@ -72,10 +393,72 @@ void t3(void) { // PR3131 float _Complex t4(void); +// CHECK-LABEL: define dso_local void @t5( +// CHECK-SAME: ) #[[ATTR2:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[X:%.*]] = alloca { float, float }, align 4 +// CHECK-NEXT: [[COERCE:%.*]] = alloca { float, float }, align 4 +// CHECK-NEXT: [[CALL:%.*]] = call <2 x float> @t4() +// CHECK-NEXT: store <2 x float> [[CALL]], ptr [[COERCE]], align 4 +// CHECK-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0 +// CHECK-NEXT: [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4 +// CHECK-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1 +// CHECK-NEXT: [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4 +// CHECK-NEXT: [[X_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[X]], i32 0, i32 0 +// CHECK-NEXT: [[X_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[X]], i32 0, i32 1 +// CHECK-NEXT: store float [[COERCE_REAL]], ptr [[X_REALP]], align 4 +// CHECK-NEXT: store float [[COERCE_IMAG]], ptr [[X_IMAGP]], align 4 +// CHECK-NEXT: ret void +// void t5(void) { float _Complex x = t4(); } +// CHECK-LABEL: define dso_local void @t6( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[G1_REAL:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[INC:%.*]] = fadd double [[G1_REAL]], 1.000000e+00 +// CHECK-NEXT: store double [[INC]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL1:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG2:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[DEC:%.*]] = fadd double [[G1_REAL1]], -1.000000e+00 +// CHECK-NEXT: store double [[DEC]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG2]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL3:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG4:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[INC5:%.*]] = fadd double [[G1_REAL3]], 1.000000e+00 +// CHECK-NEXT: store double [[INC5]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG4]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[G1_REAL6:%.*]] = load double, ptr @g1, align 8 +// CHECK-NEXT: [[G1_IMAG7:%.*]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[DEC8:%.*]] = fadd double [[G1_REAL6]], -1.000000e+00 +// CHECK-NEXT: store double [[DEC8]], ptr @g1, align 8 +// CHECK-NEXT: store double [[G1_IMAG7]], ptr getelementptr inbounds nuw ({ double, double }, ptr @g1, i32 0, i32 1), align 8 +// CHECK-NEXT: [[CI1_REAL:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[INC9:%.*]] = add i32 [[CI1_REAL]], 1 +// CHECK-NEXT: store i32 [[INC9]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[CI1_IMAG]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL10:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG11:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[DEC12:%.*]] = add i32 [[CI1_REAL10]], -1 +// CHECK-NEXT: store i32 [[DEC12]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[CI1_IMAG11]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL13:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG14:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[INC15:%.*]] = add i32 [[CI1_REAL13]], 1 +// CHECK-NEXT: store i32 [[INC15]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[CI1_IMAG14]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[CI1_REAL16:%.*]] = load i32, ptr @ci1, align 4 +// CHECK-NEXT: [[CI1_IMAG17:%.*]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: [[DEC18:%.*]] = add i32 [[CI1_REAL16]], -1 +// CHECK-NEXT: store i32 [[DEC18]], ptr @ci1, align 4 +// CHECK-NEXT: store i32 [[CI1_IMAG17]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @ci1, i32 0, i32 1), align 4 +// CHECK-NEXT: ret void +// void t6(void) { g1++; g1--; @@ -87,18 +470,68 @@ void t6(void) { --ci1; } +// CHECK-LABEL: define dso_local double @t7( +// CHECK-SAME: double noundef [[C_COERCE0:%.*]], double noundef [[C_COERCE1:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[C]], i32 0, i32 0 +// CHECK-NEXT: store double [[C_COERCE0]], ptr [[TMP0]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[C]], i32 0, i32 1 +// CHECK-NEXT: store double [[C_COERCE1]], ptr [[TMP1]], align 8 +// CHECK-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[C]], i32 0, i32 0 +// CHECK-NEXT: [[TMP2:%.*]] = load double, ptr [[C_REALP]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = call double @llvm.fabs.f64(double [[TMP2]]) +// CHECK-NEXT: ret double [[TMP3]] +// double t7(double _Complex c) { return __builtin_fabs(__real__(c)); } +// CHECK-LABEL: define dso_local void @t8( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[X:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[DOTCOMPOUNDLITERAL:%.*]] = alloca { i32, i32 }, align 4 +// CHECK-NEXT: [[DOTCOMPOUNDLITERAL_REALP:%.*]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 0 +// CHECK-NEXT: [[DOTCOMPOUNDLITERAL_IMAGP:%.*]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DOTCOMPOUNDLITERAL]], i32 0, i32 1 +// CHECK-NEXT: store i32 1, ptr [[DOTCOMPOUNDLITERAL_REALP]], align 4 +// CHECK-NEXT: store i32 0, ptr [[DOTCOMPOUNDLITERAL_IMAGP]], align 4 +// CHECK-NEXT: store ptr [[DOTCOMPOUNDLITERAL]], ptr [[X]], align 8 +// CHECK-NEXT: ret void +// void t8(void) { __complex__ int *x = &(__complex__ int){1}; } const _Complex double test9const = 0; +// CHECK-LABEL: define dso_local { double, double } @test9func( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8 +// CHECK-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0 +// CHECK-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1 +// CHECK-NEXT: store double 0.000000e+00, ptr [[RETVAL_REALP]], align 8 +// CHECK-NEXT: store double 0.000000e+00, ptr [[RETVAL_IMAGP]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load { double, double }, ptr [[RETVAL]], align 8 +// CHECK-NEXT: ret { double, double } [[TMP0]] +// _Complex double test9func(void) { return test9const; } // D6217 +// CHECK-LABEL: define dso_local void @t91( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C:%.*]] = alloca [0 x i8], align 1 +// CHECK-NEXT: br i1 false, label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK: [[COND_TRUE]]: +// CHECK-NEXT: br label %[[COND_END:.*]] +// CHECK: [[COND_FALSE]]: +// CHECK-NEXT: br label %[[COND_END]] +// CHECK: [[COND_END]]: +// CHECK-NEXT: [[COND_R:%.*]] = phi double [ 2.000000e+00, %[[COND_TRUE]] ], [ 2.000000e+00, %[[COND_FALSE]] ] +// CHECK-NEXT: [[COND_I:%.*]] = phi double [ 0.000000e+00, %[[COND_TRUE]] ], [ 0.000000e+00, %[[COND_FALSE]] ] +// CHECK-NEXT: ret void +// void t91(void) { // Check for proper type promotion of conditional expression char c[(int)(sizeof(typeof((0 ? 2.0f : (_Complex double) 2.0f))) - sizeof(_Complex double))]; @@ -106,6 +539,20 @@ void t91(void) { (0 ? 2.0f : (_Complex double) 2.0f); } +// CHECK-LABEL: define dso_local void @t92( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[C:%.*]] = alloca [0 x i8], align 1 +// CHECK-NEXT: br i1 false, label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK: [[COND_TRUE]]: +// CHECK-NEXT: br label %[[COND_END:.*]] +// CHECK: [[COND_FALSE]]: +// CHECK-NEXT: br label %[[COND_END]] +// CHECK: [[COND_END]]: +// CHECK-NEXT: [[COND_R:%.*]] = phi double [ 2.000000e+00, %[[COND_TRUE]] ], [ 2.000000e+00, %[[COND_FALSE]] ] +// CHECK-NEXT: [[COND_I:%.*]] = phi double [ 0.000000e+00, %[[COND_TRUE]] ], [ 0.000000e+00, %[[COND_FALSE]] ] +// CHECK-NEXT: ret void +// void t92(void) { // Check for proper type promotion of conditional expression char c[(int)(sizeof(typeof((0 ? (_Complex double) 2.0f : 2.0f))) - sizeof(_Complex double))]; @@ -113,3 +560,36 @@ void t92(void) { (0 ? (_Complex double) 2.0f : 2.0f); } +// CHECK-LABEL: define dso_local void @real_on_scalar_with_type_promotion( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 +// CHECK-NEXT: [[B:%.*]] = alloca half, align 2 +// CHECK-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 0 +// CHECK-NEXT: [[A_REAL:%.*]] = load half, ptr [[A_REALP]], align 2 +// CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_REAL]] to float +// CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half +// CHECK-NEXT: store half [[UNPROMOTION]], ptr [[B]], align 2 +// CHECK-NEXT: ret void +// +void real_on_scalar_with_type_promotion() { + _Float16 _Complex a; + _Float16 b = __real__(__real__ a); +} + +// CHECK-LABEL: define dso_local void @imag_on_scalar_with_type_promotion( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[A:%.*]] = alloca { half, half }, align 2 +// CHECK-NEXT: [[B:%.*]] = alloca half, align 2 +// CHECK-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { half, half }, ptr [[A]], i32 0, i32 1 +// CHECK-NEXT: [[A_IMAG:%.*]] = load half, ptr [[A_IMAGP]], align 2 +// CHECK-NEXT: [[EXT:%.*]] = fpext half [[A_IMAG]] to float +// CHECK-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[EXT]] to half +// CHECK-NEXT: store half [[UNPROMOTION]], ptr [[B]], align 2 +// CHECK-NEXT: ret void +// +void imag_on_scalar_with_type_promotion() { + _Float16 _Complex a; + _Float16 b = __real__(__imag__ a); +} diff --git a/clang/test/DebugInfo/KeyInstructions/atomic.c b/clang/test/DebugInfo/KeyInstructions/atomic.c index c25f4385843b9..37355d0f6edb6 100644 --- a/clang/test/DebugInfo/KeyInstructions/atomic.c +++ b/clang/test/DebugInfo/KeyInstructions/atomic.c @@ -29,79 +29,80 @@ void fun() { int r3 = __atomic_test_and_set(&x, __ATOMIC_RELAXED); // CHECK-NEXT: %6 = atomicrmw xchg ptr @x, i8 1 monotonic, align 4, !dbg [[LINE30:!.*]] -// CHECK-NEXT: %tobool = icmp ne i8 %6, 0, !dbg [[LINE30_G7R2:!.*]] -// CHECK-NEXT: store i1 %tobool, ptr %atomic-temp3, align 1, !dbg [[LINE30_G7R1:!.*]] +// CHECK-NEXT: %tobool = icmp ne i8 %6, 0, !dbg [[LINE30_G7R3:!.*]] +// CHECK-NEXT: %storedv = zext i1 %tobool to i8, !dbg [[LINE30_G7R2:!.*]] +// CHECK-NEXT: store i8 %storedv, ptr %atomic-temp3, align 1, !dbg [[LINE30_G7R1:!.*]] // CHECK-NEXT: %7 = load i8, ptr %atomic-temp3, align 1, !dbg [[LINE30_G6R4:!.*]] // CHECK-NEXT: %loadedv = trunc i8 %7 to i1, !dbg [[LINE30_G6R3:!.*]] // CHECK-NEXT: %conv = zext i1 %loadedv to i32, !dbg [[LINE30_G6R2:!.*]] // CHECK-NEXT: store i32 %conv, ptr %r3, align 4, !dbg [[LINE30_G6R1:!.*]] __atomic_clear(&x, __ATOMIC_RELAXED); -// CHECK-NEXT: store atomic i8 0, ptr @x monotonic, align 4, !dbg [[LINE39_G8R1:!.*]] +// CHECK-NEXT: store atomic i8 0, ptr @x monotonic, align 4, !dbg [[LINE40_G8R1:!.*]] int r4 = __c11_atomic_exchange(&x, 2,__ATOMIC_RELAXED); -// CHECK-NEXT: store i32 2, ptr %.atomictmp4, align 4, !dbg [[LINE42_G10R1:!.*]] -// CHECK-NEXT: %8 = load i32, ptr %.atomictmp4, align 4, !dbg [[LINE42:!.*]] -// CHECK-NEXT: %9 = atomicrmw xchg ptr @x, i32 %8 monotonic, align 4, !dbg [[LINE42_G10R2:!.*]] -// CHECK-NEXT: store i32 %9, ptr %atomic-temp5, align 4, !dbg [[LINE42_G10R1:!.*]] -// CHECK-NEXT: %10 = load i32, ptr %atomic-temp5, align 4, !dbg [[LINE42_G9R2:!.*]] -// CHECK-NEXT: store i32 %10, ptr %r4, align 4, !dbg [[LINE42_G9R1:!.*]] +// CHECK-NEXT: store i32 2, ptr %.atomictmp4, align 4, !dbg [[LINE43_G10R1:!.*]] +// CHECK-NEXT: %8 = load i32, ptr %.atomictmp4, align 4, !dbg [[LINE43:!.*]] +// CHECK-NEXT: %9 = atomicrmw xchg ptr @x, i32 %8 monotonic, align 4, !dbg [[LINE43_G10R2:!.*]] +// CHECK-NEXT: store i32 %9, ptr %atomic-temp5, align 4, !dbg [[LINE43_G10R1:!.*]] +// CHECK-NEXT: %10 = load i32, ptr %atomic-temp5, align 4, !dbg [[LINE43_G9R2:!.*]] +// CHECK-NEXT: store i32 %10, ptr %r4, align 4, !dbg [[LINE43_G9R1:!.*]] int r5 = __atomic_compare_exchange(&y, &y, &y, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); -// CHECK-NEXT: %11 = load i32, ptr @y, align 4, !dbg [[LINE50:!.*]] -// CHECK-NEXT: %12 = load i32, ptr @y, align 4, !dbg [[LINE50]] -// CHECK-NEXT: %13 = cmpxchg ptr @y, i32 %11, i32 %12 monotonic monotonic, align 4, !dbg [[LINE50]] -// CHECK-NEXT: %14 = extractvalue { i32, i1 } %13, 0, !dbg [[LINE50_G12R2:!.*]] -// CHECK-NEXT: %15 = extractvalue { i32, i1 } %13, 1, !dbg [[LINE50_G12R3:!.*]] -// CHECK-NEXT: br i1 %15, label %cmpxchg.continue, label %cmpxchg.store_expected, !dbg [[LINE50]] +// CHECK-NEXT: %11 = load i32, ptr @y, align 4, !dbg [[LINE51:!.*]] +// CHECK-NEXT: %12 = load i32, ptr @y, align 4, !dbg [[LINE51]] +// CHECK-NEXT: %13 = cmpxchg ptr @y, i32 %11, i32 %12 monotonic monotonic, align 4, !dbg [[LINE51]] +// CHECK-NEXT: %14 = extractvalue { i32, i1 } %13, 0, !dbg [[LINE51_G12R2:!.*]] +// CHECK-NEXT: %15 = extractvalue { i32, i1 } %13, 1, !dbg [[LINE51_G12R3:!.*]] +// CHECK-NEXT: br i1 %15, label %cmpxchg.continue, label %cmpxchg.store_expected, !dbg [[LINE51]] // CHECK: cmpxchg.store_expected: -// CHECK-NEXT: store i32 %14, ptr @y, align 4, !dbg [[LINE50_G12R1:!.*]] -// CHECK-NEXT: br label %cmpxchg.continue, !dbg [[LINE50]] +// CHECK-NEXT: store i32 %14, ptr @y, align 4, !dbg [[LINE51_G12R1:!.*]] +// CHECK-NEXT: br label %cmpxchg.continue, !dbg [[LINE51]] // CHECK: cmpxchg.continue: -// CHECK-NEXT: %storedv = zext i1 %15 to i8, !dbg [[LINE50_G12R2]] -// CHECK-NEXT: store i8 %storedv, ptr %cmpxchg.bool, align 1, !dbg [[LINE50_G12R1]] -// CHECK-NEXT: %16 = load i8, ptr %cmpxchg.bool, align 1, !dbg [[LINE50_G11R4:!.*]] -// CHECK-NEXT: %loadedv6 = trunc i8 %16 to i1, !dbg [[LINE50_G11R3:!.*]] -// CHECK-NEXT: %conv7 = zext i1 %loadedv6 to i32, !dbg [[LINE50_G11R2:!.*]] -// CHECK-NEXT: store i32 %conv7, ptr %r5, align 4, !dbg [[LINE50_G11R1:!.*]] +// CHECK-NEXT: %storedv6 = zext i1 %15 to i8, !dbg [[LINE51_G12R2]] +// CHECK-NEXT: store i8 %storedv6, ptr %cmpxchg.bool, align 1, !dbg [[LINE51_G12R1]] +// CHECK-NEXT: %16 = load i8, ptr %cmpxchg.bool, align 1, !dbg [[LINE51_G11R4:!.*]] +// CHECK-NEXT: %loadedv7 = trunc i8 %16 to i1, !dbg [[LINE51_G11R3:!.*]] +// CHECK-NEXT: %conv8 = zext i1 %loadedv7 to i32, !dbg [[LINE51_G11R2:!.*]] +// CHECK-NEXT: store i32 %conv8, ptr %r5, align 4, !dbg [[LINE51_G11R1:!.*]] int r6 = __c11_atomic_compare_exchange_strong(&x, &y, 42, __ATOMIC_RELAXED, __ATOMIC_RELAXED); -// CHECK-NEXT: store i32 42, ptr %.atomictmp8, align 4, !dbg [[LINE68_G14R1:!.*]] -// CHECK-NEXT: %17 = load i32, ptr @y, align 4, !dbg [[LINE68:!.*]] -// CHECK-NEXT: %18 = load i32, ptr %.atomictmp8, align 4, !dbg [[LINE68]] -// CHECK-NEXT: %19 = cmpxchg ptr @x, i32 %17, i32 %18 monotonic monotonic, align 4, !dbg [[LINE68]] -// CHECK-NEXT: %20 = extractvalue { i32, i1 } %19, 0, !dbg [[LINE68_G14R2:!.*]] -// CHECK-NEXT: %21 = extractvalue { i32, i1 } %19, 1, !dbg [[LINE68_G14R3:!.*]] -// CHECK-NEXT: br i1 %21, label %cmpxchg.continue11, label %cmpxchg.store_expected10, !dbg [[LINE68]] -// CHECK: cmpxchg.store_expected10: -// CHECK-NEXT: store i32 %20, ptr @y, align 4, !dbg [[LINE68_G14R1:!.*]] -// CHECK-NEXT: br label %cmpxchg.continue11, !dbg [[LINE68]] -// CHECK: cmpxchg.continue11: -// CHECK-NEXT: %storedv12 = zext i1 %21 to i8, !dbg [[LINE68_G14R2]] -// CHECK-NEXT: store i8 %storedv12, ptr %cmpxchg.bool9, align 1, !dbg [[LINE68_G14R1:!.*]] -// CHECK-NEXT: %22 = load i8, ptr %cmpxchg.bool9, align 1, !dbg [[LINE68_G13R4:!.*]] -// CHECK-NEXT: %loadedv13 = trunc i8 %22 to i1, !dbg [[LINE68_G13R3:!.*]] -// CHECK-NEXT: %conv14 = zext i1 %loadedv13 to i32, !dbg [[LINE68_G13R2:!.*]] -// CHECK-NEXT: store i32 %conv14, ptr %r6, align 4, !dbg [[LINE68_G13R1:!.*]] +// CHECK-NEXT: store i32 42, ptr %.atomictmp9, align 4, !dbg [[LINE69_G14R1:!.*]] +// CHECK-NEXT: %17 = load i32, ptr @y, align 4, !dbg [[LINE69:!.*]] +// CHECK-NEXT: %18 = load i32, ptr %.atomictmp9, align 4, !dbg [[LINE69]] +// CHECK-NEXT: %19 = cmpxchg ptr @x, i32 %17, i32 %18 monotonic monotonic, align 4, !dbg [[LINE69]] +// CHECK-NEXT: %20 = extractvalue { i32, i1 } %19, 0, !dbg [[LINE69_G14R2:!.*]] +// CHECK-NEXT: %21 = extractvalue { i32, i1 } %19, 1, !dbg [[LINE69_G14R3:!.*]] +// CHECK-NEXT: br i1 %21, label %cmpxchg.continue12, label %cmpxchg.store_expected11, !dbg [[LINE69]] +// CHECK: cmpxchg.store_expected11: +// CHECK-NEXT: store i32 %20, ptr @y, align 4, !dbg [[LINE69_G14R1:!.*]] +// CHECK-NEXT: br label %cmpxchg.continue12, !dbg [[LINE69]] +// CHECK: cmpxchg.continue12: +// CHECK-NEXT: %storedv13 = zext i1 %21 to i8, !dbg [[LINE69_G14R2]] +// CHECK-NEXT: store i8 %storedv13, ptr %cmpxchg.bool10, align 1, !dbg [[LINE69_G14R1:!.*]] +// CHECK-NEXT: %22 = load i8, ptr %cmpxchg.bool10, align 1, !dbg [[LINE69_G13R4:!.*]] +// CHECK-NEXT: %loadedv14 = trunc i8 %22 to i1, !dbg [[LINE69_G13R3:!.*]] +// CHECK-NEXT: %conv15 = zext i1 %loadedv14 to i32, !dbg [[LINE69_G13R2:!.*]] +// CHECK-NEXT: store i32 %conv15, ptr %r6, align 4, !dbg [[LINE69_G13R1:!.*]] int r7 = __c11_atomic_compare_exchange_weak(&x, &y, 43, __ATOMIC_RELAXED, __ATOMIC_RELAXED); -// CHECK-NEXT: store i32 43, ptr %.atomictmp15, align 4, !dbg [[LINE87_G16R1:!.*]] -// CHECK-NEXT: %23 = load i32, ptr @y, align 4, !dbg [[LINE87:!.*]] -// CHECK-NEXT: %24 = load i32, ptr %.atomictmp15, align 4, !dbg [[LINE87]] -// CHECK-NEXT: %25 = cmpxchg weak ptr @x, i32 %23, i32 %24 monotonic monotonic, align 4, !dbg [[LINE87]] -// CHECK-NEXT: %26 = extractvalue { i32, i1 } %25, 0, !dbg [[LINE87_G16R2:!.*]] -// CHECK-NEXT: %27 = extractvalue { i32, i1 } %25, 1, !dbg [[LINE87_G16R3:!.*]] -// CHECK-NEXT: br i1 %27, label %cmpxchg.continue18, label %cmpxchg.store_expected17, !dbg [[LINE87]] -// CHECK: cmpxchg.store_expected17: -// CHECK-NEXT: store i32 %26, ptr @y, align 4, !dbg [[LINE87_G16R1]] -// CHECK-NEXT: br label %cmpxchg.continue18, !dbg [[LINE87]] -// CHECK: cmpxchg.continue18: -// CHECK-NEXT: %storedv19 = zext i1 %27 to i8, !dbg [[LINE87_G16R2]] -// CHECK-NEXT: store i8 %storedv19, ptr %cmpxchg.bool16, align 1, !dbg [[LINE87_G16R1]] -// CHECK-NEXT: %28 = load i8, ptr %cmpxchg.bool16, align 1, !dbg [[LINE87_G15R4:!.*]] -// CHECK-NEXT: %loadedv20 = trunc i8 %28 to i1, !dbg [[LINE87_G15R3:!.*]] -// CHECK-NEXT: %conv21 = zext i1 %loadedv20 to i32, !dbg [[LINE87_G15R2:!.*]] -// CHECK-NEXT: store i32 %conv21, ptr %r7, align 4, !dbg [[LINE87_G15R1:!.*]] +// CHECK-NEXT: store i32 43, ptr %.atomictmp16, align 4, !dbg [[LINE88_G16R1:!.*]] +// CHECK-NEXT: %23 = load i32, ptr @y, align 4, !dbg [[LINE88:!.*]] +// CHECK-NEXT: %24 = load i32, ptr %.atomictmp16, align 4, !dbg [[LINE88]] +// CHECK-NEXT: %25 = cmpxchg weak ptr @x, i32 %23, i32 %24 monotonic monotonic, align 4, !dbg [[LINE88]] +// CHECK-NEXT: %26 = extractvalue { i32, i1 } %25, 0, !dbg [[LINE88_G16R2:!.*]] +// CHECK-NEXT: %27 = extractvalue { i32, i1 } %25, 1, !dbg [[LINE88_G16R3:!.*]] +// CHECK-NEXT: br i1 %27, label %cmpxchg.continue19, label %cmpxchg.store_expected18, !dbg [[LINE88]] +// CHECK: cmpxchg.store_expected18: +// CHECK-NEXT: store i32 %26, ptr @y, align 4, !dbg [[LINE88_G16R1]] +// CHECK-NEXT: br label %cmpxchg.continue19, !dbg [[LINE88]] +// CHECK: cmpxchg.continue19: +// CHECK-NEXT: %storedv20 = zext i1 %27 to i8, !dbg [[LINE88_G16R2]] +// CHECK-NEXT: store i8 %storedv20, ptr %cmpxchg.bool17, align 1, !dbg [[LINE88_G16R1]] +// CHECK-NEXT: %28 = load i8, ptr %cmpxchg.bool17, align 1, !dbg [[LINE88_G15R4:!.*]] +// CHECK-NEXT: %loadedv21 = trunc i8 %28 to i1, !dbg [[LINE88_G15R3:!.*]] +// CHECK-NEXT: %conv22 = zext i1 %loadedv21 to i32, !dbg [[LINE88_G15R2:!.*]] +// CHECK-NEXT: store i32 %conv22, ptr %r7, align 4, !dbg [[LINE88_G15R1:!.*]] // CHECK: ret{{.*}}, !dbg [[RET:!.*]] } @@ -121,6 +122,7 @@ void fun() { // CHECK: [[LINE25_G5R2]] = !DILocation(line: 25, scope: ![[#]], atomGroup: 5, atomRank: 2) // CHECK: [[LINE30]] = !DILocation(line: 30, scope: ![[#]]) +// CHECK: [[LINE30_G7R3]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 7, atomRank: 3) // CHECK: [[LINE30_G7R2]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 7, atomRank: 2) // CHECK: [[LINE30_G7R1]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 7, atomRank: 1) // CHECK: [[LINE30_G6R4]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 6, atomRank: 4) @@ -128,39 +130,39 @@ void fun() { // CHECK: [[LINE30_G6R2]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 6, atomRank: 2) // CHECK: [[LINE30_G6R1]] = !DILocation(line: 30, scope: ![[#]], atomGroup: 6, atomRank: 1) -// CHECK: [[LINE39_G8R1]] = !DILocation(line: 39, scope: ![[#]], atomGroup: 8, atomRank: 1) - -// CHECK: [[LINE42_G10R1]] = !DILocation(line: 42, scope: ![[#]], atomGroup: 10, atomRank: 1) -// CHECK: [[LINE42]] = !DILocation(line: 42, scope: ![[#]]) -// CHECK: [[LINE42_G10R2]] = !DILocation(line: 42, scope: ![[#]], atomGroup: 10, atomRank: 2) -// CHECK: [[LINE42_G9R2]] = !DILocation(line: 42, scope: ![[#]], atomGroup: 9, atomRank: 2) -// CHECK: [[LINE42_G9R1]] = !DILocation(line: 42, scope: ![[#]], atomGroup: 9, atomRank: 1) - -// CHECK: [[LINE50]] = !DILocation(line: 50, scope: ![[#]]) -// CHECK: [[LINE50_G12R2]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 12, atomRank: 2) -// CHECK: [[LINE50_G12R3]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 12, atomRank: 3) -// CHECK: [[LINE50_G12R1]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 12, atomRank: 1) -// CHECK: [[LINE50_G11R4]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 11, atomRank: 4) -// CHECK: [[LINE50_G11R3]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 11, atomRank: 3) -// CHECK: [[LINE50_G11R2]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 11, atomRank: 2) -// CHECK: [[LINE50_G11R1]] = !DILocation(line: 50, scope: ![[#]], atomGroup: 11, atomRank: 1) - -// CHECK: [[LINE68_G14R1]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 14, atomRank: 1) -// CHECK: [[LINE68]] = !DILocation(line: 68, scope: ![[#]]) -// CHECK: [[LINE68_G14R2]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 14, atomRank: 2) -// CHECK: [[LINE68_G14R3]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 14, atomRank: 3) -// CHECK: [[LINE68_G13R4]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 13, atomRank: 4) -// CHECK: [[LINE68_G13R3]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 13, atomRank: 3) -// CHECK: [[LINE68_G13R2]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 13, atomRank: 2) -// CHECK: [[LINE68_G13R1]] = !DILocation(line: 68, scope: ![[#]], atomGroup: 13, atomRank: 1) - -// CHECK: [[LINE87_G16R1]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 16, atomRank: 1) -// CHECK: [[LINE87]] = !DILocation(line: 87, scope: ![[#]]) -// CHECK: [[LINE87_G16R2]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 16, atomRank: 2) -// CHECK: [[LINE87_G16R3]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 16, atomRank: 3) -// CHECK: [[LINE87_G15R4]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 15, atomRank: 4) -// CHECK: [[LINE87_G15R3]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 15, atomRank: 3) -// CHECK: [[LINE87_G15R2]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 15, atomRank: 2) -// CHECK: [[LINE87_G15R1]] = !DILocation(line: 87, scope: ![[#]], atomGroup: 15, atomRank: 1) +// CHECK: [[LINE40_G8R1]] = !DILocation(line: 40, scope: ![[#]], atomGroup: 8, atomRank: 1) + +// CHECK: [[LINE43_G10R1]] = !DILocation(line: 43, scope: ![[#]], atomGroup: 10, atomRank: 1) +// CHECK: [[LINE43]] = !DILocation(line: 43, scope: ![[#]]) +// CHECK: [[LINE43_G10R2]] = !DILocation(line: 43, scope: ![[#]], atomGroup: 10, atomRank: 2) +// CHECK: [[LINE43_G9R2]] = !DILocation(line: 43, scope: ![[#]], atomGroup: 9, atomRank: 2) +// CHECK: [[LINE43_G9R1]] = !DILocation(line: 43, scope: ![[#]], atomGroup: 9, atomRank: 1) + +// CHECK: [[LINE51]] = !DILocation(line: 51, scope: ![[#]]) +// CHECK: [[LINE51_G12R2]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 12, atomRank: 2) +// CHECK: [[LINE51_G12R3]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 12, atomRank: 3) +// CHECK: [[LINE51_G12R1]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 12, atomRank: 1) +// CHECK: [[LINE51_G11R4]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 11, atomRank: 4) +// CHECK: [[LINE51_G11R3]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 11, atomRank: 3) +// CHECK: [[LINE51_G11R2]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 11, atomRank: 2) +// CHECK: [[LINE51_G11R1]] = !DILocation(line: 51, scope: ![[#]], atomGroup: 11, atomRank: 1) + +// CHECK: [[LINE69_G14R1]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 14, atomRank: 1) +// CHECK: [[LINE69]] = !DILocation(line: 69, scope: ![[#]]) +// CHECK: [[LINE69_G14R2]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 14, atomRank: 2) +// CHECK: [[LINE69_G14R3]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 14, atomRank: 3) +// CHECK: [[LINE69_G13R4]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 13, atomRank: 4) +// CHECK: [[LINE69_G13R3]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 13, atomRank: 3) +// CHECK: [[LINE69_G13R2]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 13, atomRank: 2) +// CHECK: [[LINE69_G13R1]] = !DILocation(line: 69, scope: ![[#]], atomGroup: 13, atomRank: 1) + +// CHECK: [[LINE88_G16R1]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 16, atomRank: 1) +// CHECK: [[LINE88]] = !DILocation(line: 88, scope: ![[#]]) +// CHECK: [[LINE88_G16R2]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 16, atomRank: 2) +// CHECK: [[LINE88_G16R3]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 16, atomRank: 3) +// CHECK: [[LINE88_G15R4]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 15, atomRank: 4) +// CHECK: [[LINE88_G15R3]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 15, atomRank: 3) +// CHECK: [[LINE88_G15R2]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 15, atomRank: 2) +// CHECK: [[LINE88_G15R1]] = !DILocation(line: 88, scope: ![[#]], atomGroup: 15, atomRank: 1) // CHECK: [[RET]] = !DILocation({{.*}}, atomGroup: 17, atomRank: 1) diff --git a/clang/test/Driver/Ofast.c b/clang/test/Driver/Ofast.c index 612478cc89558..e04ce036638f9 100644 --- a/clang/test/Driver/Ofast.c +++ b/clang/test/Driver/Ofast.c @@ -2,7 +2,7 @@ // RUN: %clang -c -O2 -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s // RUN: %clang -c -fno-fast-math -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s // RUN: %clang -c -fno-strict-aliasing -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s -// RUN: %clang -c -fno-vectorize -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST %s +// RUN: %clang -c -fno-vectorize -Ofast -### %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-NO-VECTORIZE %s // RUN: %clang -c -Ofast -O2 -### -Werror %s 2>&1 | FileCheck -check-prefix=CHECK-OFAST-O2 \ // RUN: %if target={{.*-windows-msvc.*}} %{ --check-prefix=CHECK-OFAST-O2-ALIASING-MSVC %} \ // RUN: %else %{ --check-prefix=CHECK-OFAST-O2-ALIASING %} %s diff --git a/clang/test/Driver/clang_f_opts.c b/clang/test/Driver/clang_f_opts.c index bdeb747aa66a3..94b983f14e3ef 100644 --- a/clang/test/Driver/clang_f_opts.c +++ b/clang/test/Driver/clang_f_opts.c @@ -156,7 +156,7 @@ // RUN: %clang -### -S -O2 %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s // RUN: %clang -### -S -Os %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s // RUN: %clang -### -S -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s -// RUN: %clang -### -S -fno-vectorize -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s +// RUN: %clang -### -S -fno-vectorize -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-NO-VECTORIZE %s // RUN: %clang -### -S -O1 -fvectorize %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s // RUN: %clang -### -S -Ofast %s 2>&1 | FileCheck -check-prefix=CHECK-VECTORIZE %s // RUN: %clang -### -S %s 2>&1 | FileCheck -check-prefix=CHECK-NO-VECTORIZE %s @@ -179,7 +179,7 @@ // RUN: %clang -### -S -Os %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s // RUN: %clang -### -S -Oz %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s // RUN: %clang -### -S -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s -// RUN: %clang -### -S -fno-slp-vectorize -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s +// RUN: %clang -### -S -fno-slp-vectorize -O3 %s 2>&1 | FileCheck -check-prefix=CHECK-NO-SLP-VECTORIZE %s // RUN: %clang -### -S -O1 -fslp-vectorize %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s // RUN: %clang -### -S -Ofast %s 2>&1 | FileCheck -check-prefix=CHECK-SLP-VECTORIZE %s // RUN: %clang -### -S %s 2>&1 | FileCheck -check-prefix=CHECK-NO-SLP-VECTORIZE %s diff --git a/clang/test/Driver/linker-wrapper-image.c b/clang/test/Driver/linker-wrapper-image.c index c0de56d58196a..31476173cd370 100644 --- a/clang/test/Driver/linker-wrapper-image.c +++ b/clang/test/Driver/linker-wrapper-image.c @@ -1,6 +1,7 @@ // REQUIRES: x86-registered-target // REQUIRES: nvptx-registered-target // REQUIRES: amdgpu-registered-target +// REQUIRES: spirv-registered-target // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.elf.o @@ -263,3 +264,36 @@ // HIP: while.end: // HIP-NEXT: ret void // HIP-NEXT: } + +// RUN: clang-offload-packager -o %t.out --image=file=%t.elf.o,kind=sycl,triple=spirv64-unknown-unknown,arch=generic +// RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o \ +// RUN: -fembed-offload-object=%t.out +// RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-linux-gnu \ +// RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=SYCL +// RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-linux-gnu -r \ +// RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=SYCL + +// SYCL: %__sycl.tgt_device_image = type { i16, i8, i8, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr } +// SYCL-NEXT: %__sycl.tgt_bin_desc = type { i16, i16, ptr, ptr, ptr } + +// SYCL: @.sycl_offloading.target.0 = internal unnamed_addr constant [1 x i8] zeroinitializer +// SYCL-NEXT: @.sycl_offloading.opts.compile.0 = internal unnamed_addr constant [1 x i8] zeroinitializer +// SYCL-NEXT: @.sycl_offloading.opts.link.0 = internal unnamed_addr constant [1 x i8] zeroinitializer +// SYCL-NEXT: @.sycl_offloading.0.data = internal unnamed_addr constant [0 x i8] zeroinitializer, section ".llvm.offloading" +// SYCL-NEXT: @.offloading.entry_name = internal unnamed_addr constant [5 x i8] c"stub\00", section ".llvm.rodata.offloading", align 1 +// SYCL-NEXT: @.offloading.entry.stub = weak constant %struct.__tgt_offload_entry { i64 0, i16 1, i16 8, i32 0, ptr null, ptr @.offloading.entry_name, i64 0, i64 0, ptr null }, section "llvm_offload_entries", align 8 +// SYCL-NEXT: @.sycl_offloading.entries_arr = internal constant [1 x %struct.__tgt_offload_entry] [%struct.__tgt_offload_entry { i64 0, i16 1, i16 8, i32 0, ptr null, ptr @.offloading.entry_name, i64 0, i64 0, ptr null }] +// SYCL-NEXT: @.sycl_offloading.device_images = internal unnamed_addr constant [1 x %__sycl.tgt_device_image] [%__sycl.tgt_device_image { i16 3, i8 8, i8 0, ptr @.sycl_offloading.target.0, ptr @.sycl_offloading.opts.compile.0, ptr @.sycl_offloading.opts.link.0, ptr @.sycl_offloading.0.data, ptr @.sycl_offloading.0.data, ptr @.sycl_offloading.entries_arr, ptr getelementptr ([1 x %struct.__tgt_offload_entry], ptr @.sycl_offloading.entries_arr, i64 0, i64 1), ptr null, ptr null }] +// SYCL-NEXT: @.sycl_offloading.descriptor = internal constant %__sycl.tgt_bin_desc { i16 1, i16 1, ptr @.sycl_offloading.device_images, ptr null, ptr null } + +// SYCL: define internal void @sycl.descriptor_reg() section ".text.startup" { +// SYCL-NEXT: entry: +// SYCL-NEXT: call void @__sycl_register_lib(ptr @.sycl_offloading.descriptor) +// SYCL-NEXT: ret void +// SYCL-NEXT: } + +// SYCL: define internal void @sycl.descriptor_unreg() section ".text.startup" { +// SYCL-NEXT: entry: +// SYCL-NEXT: call void @__sycl_unregister_lib(ptr @.sycl_offloading.descriptor) +// SYCL-NEXT: ret void +// SYCL-NEXT: } diff --git a/clang/test/Driver/linker-wrapper.c b/clang/test/Driver/linker-wrapper.c index e73fa5ca3dbf9..c060dae7bb154 100644 --- a/clang/test/Driver/linker-wrapper.c +++ b/clang/test/Driver/linker-wrapper.c @@ -54,7 +54,7 @@ __attribute__((visibility("protected"), used)) int x; // RUN: clang-offload-packager -o %t.out \ // RUN: --image=file=%t.spirv.bc,kind=sycl,triple=spirv64-unknown-unknown,arch=generic // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o -fembed-offload-object=%t.out -// RUN: not clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \ +// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \ // RUN: --linker-path=/usr/bin/ld %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=SPIRV-LINK // SPIRV-LINK: clang{{.*}} -o {{.*}}.img -dumpdir a.out.spirv64..img. --target=spirv64-unknown-unknown {{.*}}.o --sycl-link -Xlinker -triple=spirv64-unknown-unknown -Xlinker -arch= diff --git a/clang/test/Lexer/cxx-features.cpp b/clang/test/Lexer/cxx-features.cpp index ced5bcaf0db16..8eb9ea032879c 100644 --- a/clang/test/Lexer/cxx-features.cpp +++ b/clang/test/Lexer/cxx-features.cpp @@ -148,7 +148,7 @@ // init_captures checked below -#if check(modules, 0, 0, 0, 0, 0, 0, 0) +#if check(modules, 0, 0, 0, 0, 1, 1, 1) // FIXME: 201907 in C++20 #error "wrong value for __cpp_modules" #endif diff --git a/clang/test/OpenMP/amdgcn_parallel_num_threads_strict_messages.cpp b/clang/test/OpenMP/amdgcn_parallel_num_threads_strict_messages.cpp new file mode 100644 index 0000000000000..513754b0bbad9 --- /dev/null +++ b/clang/test/OpenMP/amdgcn_parallel_num_threads_strict_messages.cpp @@ -0,0 +1,108 @@ +// RUN: %clang_cc1 -DF1 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF1 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null +// RUN: %clang_cc1 -DF2 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF2 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null +// RUN: %clang_cc1 -DF3 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF3 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null + +#ifndef TARGET +// expected-no-diagnostics +#endif + +#ifdef F3 +template +tx ftemplate(int n) { + tx a = 0; + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: tx(20)) severity(fatal) message("msg") + { + } + + short b = 1; +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: b) severity(warning) message("msg") + { + a += b; + } + + return a; +} +#endif + +#ifdef F2 +static +int fstatic(int n) { + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp target parallel num_threads(strict: n) message("msg") + { + } + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp target parallel num_threads(strict: 32+n) severity(warning) + { + } + + return n+1; +} +#endif + +#ifdef F1 +struct S1 { + double a; + + int r1(int n){ + int b = 1; + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: n-b) severity(warning) message("msg") + { + this->a = (double)b + 1.5; + } + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: 1024) severity(fatal) + { + this->a = 2.5; + } + + return (int)a; + } +}; +#endif + +int bar(int n){ + int a = 0; + +#ifdef F1 + #pragma omp target + { + S1 S; + a += S.r1(n); + } +#endif + +#ifdef F2 + a += fstatic(n); +#endif + +#ifdef F3 + #pragma omp target + a += ftemplate(n); +#endif + + return a; +} diff --git a/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp index 04dd9c0e9c69f..bf979d79fc61b 100644 --- a/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp +++ b/clang/test/OpenMP/distribute_parallel_for_num_threads_codegen.cpp @@ -7,14 +7,6 @@ // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3 -// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 - -// RUN: %clang_cc1 -DOMP60 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" - // RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK5 // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5 @@ -31,14 +23,6 @@ // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -DOMP60 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11 -// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -DOMP60 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11 - -// RUN: %clang_cc1 -DOMP60 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -DOMP60 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" - // RUN: %clang_cc1 -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK13 // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK13 @@ -67,20 +51,12 @@ template int tmain() { #pragma omp target #pragma omp teams -#ifdef OMP60 -#pragma omp distribute parallel for num_threads(strict: C) severity(warning) message("msg") -#else #pragma omp distribute parallel for num_threads(C) -#endif for (int i = 0; i < 100; i++) foo(); #pragma omp target #pragma omp teams -#ifdef OMP60 -#pragma omp distribute parallel for num_threads(T(23)) severity(fatal) message("msg1") -#else #pragma omp distribute parallel for num_threads(T(23)) -#endif for (int i = 0; i < 100; i++) foo(); return 0; @@ -91,22 +67,14 @@ int main() { char a = s; #pragma omp target #pragma omp teams -#ifdef OMP60 -#pragma omp distribute parallel for num_threads(2) severity(warning) message("msg2") -#else #pragma omp distribute parallel for num_threads(2) -#endif for (int i = 0; i < 100; i++) { foo(); } #pragma omp target #pragma omp teams -#ifdef OMP60 -#pragma omp distribute parallel for num_threads(a) severity(fatal) message("msg3") -#else #pragma omp distribute parallel for num_threads(a) -#endif for (int i = 0; i < 100; i++) { foo(); } @@ -125,6264 +93,6156 @@ int main() { #endif -// CHECK1-LABEL: define {{[^@]+}}@main -// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: lpad: -// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: cleanup -// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[EH_RESUME:%.*]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont5: -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont7: -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: ret i32 [[TMP41]] -// CHECK1: eh.resume: -// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_Z8mayThrowv() -// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK1-NEXT: ret i8 [[CONV]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK1-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK1-SAME: () #[[ATTR6]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK1: invoke.cont2: -// CHECK1-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK1-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@main -// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK3: lpad: -// CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: cleanup -// CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK3-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK3-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK3-NEXT: br label [[EH_RESUME:%.*]] -// CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK3-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK3-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK3: omp_offload.failed3: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK3: omp_offload.cont4: -// CHECK3-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK3-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont5: -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK3-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont7: -// CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK3-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: ret i32 [[TMP41]] -// CHECK3: eh.resume: -// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK3-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_Z8mayThrowv() -// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK3-NEXT: ret i8 [[CONV]] -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK3-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK3-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK3-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK3-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK3: omp_offload.failed3: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK3: omp_offload.cont4: -// CHECK3-NEXT: ret i32 0 -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK3-SAME: () #[[ATTR6]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK3: omp_offload.failed3: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK3: omp_offload.cont4: -// CHECK3-NEXT: ret i32 0 -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK3-SAME: () #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK3-SAME: () #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK3-SAME: () #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK3-SAME: () #[[ATTR2]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK3: invoke.cont2: -// CHECK3-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK3: cond.true: -// CHECK3-NEXT: br label [[COND_END:%.*]] -// CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: br label [[COND_END]] -// CHECK3: cond.end: -// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK3: omp.loop.exit: -// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK3-NEXT: ret void -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@main -// CHECK5-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK5-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK5-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK5-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK5-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK5: omp_offload.failed: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK5: lpad: -// CHECK5-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: cleanup -// CHECK5-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK5-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK5-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK5-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK5-NEXT: br label [[EH_RESUME:%.*]] -// CHECK5: omp_offload.cont: -// CHECK5-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK5-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK5: omp_offload.failed3: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK5: omp_offload.cont4: -// CHECK5-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK5-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK5-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK5: invoke.cont5: -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK5-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK5-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK5: invoke.cont7: -// CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK5-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK5-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK5-NEXT: ret i32 [[TMP41]] -// CHECK5: eh.resume: -// CHECK5-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK5-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK5-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK5-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK5-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK5-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: call void @_Z8mayThrowv() -// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK5-NEXT: ret i8 [[CONV]] -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK5-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK5-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK5-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK5-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK5-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK5-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK5-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK5: omp_offload.failed: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK5: omp_offload.cont: -// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK5: omp_offload.failed3: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK5: omp_offload.cont4: -// CHECK5-NEXT: ret i32 0 -// -// -// CHECK5-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK5-SAME: () #[[ATTR6]] comdat { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK5: omp_offload.failed: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK5: omp_offload.cont: -// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK5: omp_offload.failed3: -// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK5: omp_offload.cont4: -// CHECK5-NEXT: ret i32 0 -// -// -// CHECK5-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK5-SAME: () #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK5-SAME: () #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK5-SAME: () #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@main +// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK3-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: lpad: +// CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: cleanup +// CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK3-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK3-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK3-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK3-NEXT: br label [[EH_RESUME:%.*]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK3-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK3-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK3-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK3: omp_offload.failed3: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK3: omp_offload.cont4: +// CHECK3-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK3-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK3-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK3: invoke.cont5: +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK3-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK3: invoke.cont7: +// CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK3-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK3-NEXT: ret i32 [[TMP41]] +// CHECK3: eh.resume: +// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK3-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: call void @_Z8mayThrowv() +// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK3-NEXT: ret i8 [[CONV]] +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 +// CHECK3-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 +// CHECK3-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK3-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK3-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK3: omp_offload.failed3: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK3: omp_offload.cont4: +// CHECK3-NEXT: ret i32 0 +// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK3-SAME: () #[[ATTR6]] comdat { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK3-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK3: omp_offload.failed3: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK3: omp_offload.cont4: +// CHECK3-NEXT: ret i32 0 +// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 +// CHECK3-SAME: () #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 +// CHECK3-SAME: () #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 +// CHECK3-SAME: () #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str) +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 +// CHECK3-SAME: () #[[ATTR2]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) +// CHECK3-NEXT: ret void +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK3: invoke.cont2: +// CHECK3-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK3-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK3: cond.true: +// CHECK3-NEXT: br label [[COND_END:%.*]] +// CHECK3: cond.false: +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: br label [[COND_END]] +// CHECK3: cond.end: +// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK3: omp.inner.for.cond: +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK3: omp.inner.for.body: +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK3-NEXT: invoke void @_Z3foov() +// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK3: invoke.cont: +// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK3: omp.body.continue: +// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK3: omp.inner.for.inc: +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK3: omp.inner.for.end: +// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK3: omp.loop.exit: +// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK3-NEXT: ret void +// CHECK3: terminate.lpad: +// CHECK3-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK3-NEXT: catch ptr null +// CHECK3-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK3-NEXT: unreachable +// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@main +// CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11: omp_offload.failed: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK11: lpad: +// CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: cleanup +// CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK11-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK11-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK11-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[EH_RESUME:%.*]] +// CHECK11: omp_offload.cont: +// CHECK11-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK11-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK11-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK11-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK11-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK11-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK11: omp_offload.failed3: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK11: omp_offload.cont4: +// CHECK11-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK11-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK11-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK11: invoke.cont5: +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK11-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK11-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK11: invoke.cont7: +// CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK11-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP41]] +// CHECK11: eh.resume: +// CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK11-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: call void @_Z8mayThrowv() +// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK11-NEXT: ret i8 [[CONV]] +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 +// CHECK11-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK11-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 +// CHECK11-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK11-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK11-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11: omp_offload.failed: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK11: omp_offload.cont: +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK11: omp_offload.failed3: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK11: omp_offload.cont4: +// CHECK11-NEXT: ret i32 0 +// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK11-SAME: () #[[ATTR6]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11: omp_offload.failed: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK11: omp_offload.cont: +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK11: omp_offload.failed3: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK11: omp_offload.cont4: +// CHECK11-NEXT: ret i32 0 +// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 +// CHECK11-SAME: () #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str) +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 +// CHECK11-SAME: () #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 +// CHECK11-SAME: () #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str) +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 +// CHECK11-SAME: () #[[ATTR2]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) +// CHECK11-NEXT: ret void +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK11-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK11: invoke.cont2: +// CHECK11-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK11: cond.true: +// CHECK11-NEXT: br label [[COND_END:%.*]] +// CHECK11: cond.false: +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: br label [[COND_END]] +// CHECK11: cond.end: +// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK11: omp.inner.for.cond: +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK11: omp.inner.for.body: +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK11-NEXT: invoke void @_Z3foov() +// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK11: invoke.cont: +// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK11: omp.body.continue: +// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK11: omp.inner.for.inc: +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK11: omp.inner.for.end: +// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK11: omp.loop.exit: +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK11-NEXT: ret void +// CHECK11: terminate.lpad: +// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK11-NEXT: catch ptr null +// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK11-NEXT: unreachable +// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK11-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@main +// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: lpad: +// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: cleanup +// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK1-NEXT: br label [[EH_RESUME:%.*]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK1: omp_offload.failed3: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK1: omp_offload.cont4: +// CHECK1-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK1: invoke.cont5: +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK1: invoke.cont7: +// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK1-NEXT: ret i32 [[TMP41]] +// CHECK1: eh.resume: +// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] // // -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK1-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK5-SAME: () #[[ATTR2]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK5-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: call void @_Z8mayThrowv() +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK1-NEXT: ret i8 [[CONV]] // // -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK5-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK5: invoke.cont2: -// CHECK5-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68 +// CHECK1-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined) +// CHECK1-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK5: cond.true: -// CHECK5-NEXT: br label [[COND_END:%.*]] -// CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: br label [[COND_END]] -// CHECK5: cond.end: -// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: invoke void @_Z3foov() -// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK5: invoke.cont: -// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK5: omp.body.continue: -// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK5: omp.inner.for.end: -// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK5: omp.loop.exit: -// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK5-NEXT: ret void -// CHECK5: terminate.lpad: -// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK5-NEXT: catch ptr null -// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK5-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK1-NEXT: unreachable // // -// CHECK5-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@main -// CHECK9-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK9-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK9-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: lpad: -// CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: cleanup -// CHECK9-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK9-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK9-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[EH_RESUME:%.*]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK9-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK9-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK9-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK9-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK9-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK9: invoke.cont5: -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK9-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK9-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK9: invoke.cont7: -// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK9-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP41]] -// CHECK9: eh.resume: -// CHECK9-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK9-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK9-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK9-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK9-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74 +// CHECK1-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: call void @_Z8mayThrowv() -// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK9-NEXT: ret i8 [[CONV]] +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK9-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK1: omp_offload.failed3: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK1: omp_offload.cont4: +// CHECK1-NEXT: ret i32 0 // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK1-SAME: () #[[ATTR6]] comdat { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK1-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK1: omp_offload.failed3: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK1: omp_offload.cont4: +// CHECK1-NEXT: ret i32 0 // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK9-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK9-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK9-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK9-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK9-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52 +// CHECK1-SAME: () #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK9-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: ret i32 0 +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57 +// CHECK1-SAME: () #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK9-SAME: () #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: ret i32 0 +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52 +// CHECK1-SAME: () #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57 +// CHECK1-SAME: () #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined) +// CHECK1-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK1: invoke.cont2: +// CHECK1-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK1-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK1-NEXT: invoke void @_Z3foov() +// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK1: invoke.cont: +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK1-NEXT: ret void +// CHECK1: terminate.lpad: +// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK1-NEXT: catch ptr null +// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK1-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@main +// CHECK5-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK5-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK5-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK5-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK5-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5: omp_offload.failed: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK5: lpad: +// CHECK5-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: cleanup +// CHECK5-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK5-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK5-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK5-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK5-NEXT: br label [[EH_RESUME:%.*]] +// CHECK5: omp_offload.cont: +// CHECK5-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK5-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK5: omp_offload.failed3: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK5: omp_offload.cont4: +// CHECK5-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK5-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK5-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK5: invoke.cont5: +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK5-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK5-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK5: invoke.cont7: +// CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK5-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK5-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK5-NEXT: ret i32 [[TMP41]] +// CHECK5: eh.resume: +// CHECK5-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK5-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK5-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK5-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK5-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// +// +// CHECK5-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK5-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: call void @_Z8mayThrowv() +// CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK5-NEXT: ret i8 [[CONV]] // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68 +// CHECK5-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined) +// CHECK5-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK5-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK5-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK5-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK5-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74 +// CHECK5-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]]) +// CHECK5-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK5-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK9-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK5-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5: omp_offload.failed: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK5: omp_offload.cont: +// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK5: omp_offload.failed3: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK5: omp_offload.cont4: +// CHECK5-NEXT: ret i32 0 // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK9-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK9: invoke.cont2: -// CHECK9-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK5-SAME: () #[[ATTR6]] comdat { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK5-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK5-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5: omp_offload.failed: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK5: omp_offload.cont: +// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK5-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK5-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK5-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK5-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK5-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK5-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK5-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK5-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK5-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK5-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK5: omp_offload.failed3: +// CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]] +// CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK5: omp_offload.cont4: +// CHECK5-NEXT: ret i32 0 // // -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK9-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52 +// CHECK5-SAME: () #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined) +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void +// +// +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@main -// CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK11: omp_offload.failed: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK11: lpad: -// CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: cleanup -// CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK11-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK11-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK11-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[EH_RESUME:%.*]] -// CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK11-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP25]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK11-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK11-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 -// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 8 -// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP33]], align 8 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP34]], align 8 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK11-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK11-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 -// CHECK11-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK11: omp_offload.failed3: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK11: omp_offload.cont4: -// CHECK11-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 -// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK11-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() -// CHECK11-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK11: invoke.cont5: -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK11-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() -// CHECK11-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK11: invoke.cont7: -// CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK11-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP41]] -// CHECK11: eh.resume: -// CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK11-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57 +// CHECK5-SAME: () #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: call void @_Z8mayThrowv() -// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK11-NEXT: ret i8 [[CONV]] +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 -// CHECK11-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52 +// CHECK5-SAME: () #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] -// CHECK11-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK11-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57 +// CHECK5-SAME: () #[[ATTR2]] { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined) +// CHECK5-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 -// CHECK11-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK5-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK5: invoke.cont2: +// CHECK5-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK5-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK5-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 -// CHECK11-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK11-NEXT: ret void +// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined +// CHECK5-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK5: cond.true: +// CHECK5-NEXT: br label [[COND_END:%.*]] +// CHECK5: cond.false: +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: br label [[COND_END]] +// CHECK5: cond.end: +// CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK5: omp.inner.for.cond: +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK5: omp.inner.for.body: +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK5-NEXT: invoke void @_Z3foov() +// CHECK5-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK5: invoke.cont: +// CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK5: omp.body.continue: +// CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK5: omp.inner.for.inc: +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK5: omp.inner.for.end: +// CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK5: omp.loop.exit: +// CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK5-NEXT: ret void +// CHECK5: terminate.lpad: +// CHECK5-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK5-NEXT: catch ptr null +// CHECK5-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK5-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK5-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK5-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK5-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { +// CHECK5-NEXT: entry: +// CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK5-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@main +// CHECK9-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1 +// CHECK9-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) +// CHECK9-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: store i8 [[CALL]], ptr [[A]], align 1 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9: omp_offload.failed: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK9: lpad: +// CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: cleanup +// CHECK9-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 +// CHECK9-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 +// CHECK9-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[EH_RESUME:%.*]] +// CHECK9: omp_offload.cont: +// CHECK9-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 +// CHECK9-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 +// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP25]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK9-NEXT: store i32 1, ptr [[TMP26]], align 4 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8 +// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP29]], align 8 +// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8 +// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP34]], align 8 +// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP37]], align 4 +// CHECK9-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK9-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK9-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK9: omp_offload.failed3: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK9: omp_offload.cont4: +// CHECK9-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 +// CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP40]] to i32 +// CHECK9-NEXT: [[CALL6:%.*]] = invoke noundef i32 @_Z5tmainIcLi5EEiv() +// CHECK9-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] +// CHECK9: invoke.cont5: +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] +// CHECK9-NEXT: [[CALL8:%.*]] = invoke noundef i32 @_Z5tmainI1SLi1EEiv() +// CHECK9-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] +// CHECK9: invoke.cont7: +// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] +// CHECK9-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 +// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP41]] +// CHECK9: eh.resume: +// CHECK9-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 +// CHECK9-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 +// CHECK9-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 +// CHECK9-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 +// CHECK9-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] +// +// +// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC1El +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK11-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK11: omp_offload.failed: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK11: omp_offload.failed3: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK11: omp_offload.cont4: -// CHECK11-NEXT: ret i32 0 +// CHECK9-LABEL: define {{[^@]+}}@_ZN1ScvcEv +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: call void @_Z8mayThrowv() +// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 +// CHECK9-NEXT: ret i8 [[CONV]] // // -// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK11-SAME: () #[[ATTR6]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK11: omp_offload.failed: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK11-NEXT: store i32 3, ptr [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK11-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 8 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK11-NEXT: store i64 100, ptr [[TMP23]], align 8 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK11-NEXT: store i64 0, ptr [[TMP24]], align 8 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK11-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK11-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK11-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK11: omp_offload.failed3: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK11: omp_offload.cont4: -// CHECK11-NEXT: ret i32 0 +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68 +// CHECK9-SAME: () #[[ATTR2:[0-9]+]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 2) +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7:[0-9]+]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@__clang_call_terminate +// CHECK9-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { +// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] +// CHECK9-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74 +// CHECK9-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META17:![0-9]+]] +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK9-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]]) +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable +// +// +// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv +// CHECK9-SAME: () #[[ATTR6:[0-9]+]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9: omp_offload.failed: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK9: omp_offload.cont: +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK9: omp_offload.failed3: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK9: omp_offload.cont4: +// CHECK9-NEXT: ret i32 0 // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 -// CHECK11-SAME: () #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv +// CHECK9-SAME: () #[[ATTR6]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 +// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9: omp_offload.failed: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK9: omp_offload.cont: +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 +// CHECK9-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP16]], align 4 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 +// CHECK9-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 +// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 +// CHECK9-NEXT: store i64 100, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 +// CHECK9-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP25]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 +// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 +// CHECK9-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK9-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK9-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] +// CHECK9: omp_offload.failed3: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] +// CHECK9: omp_offload.cont4: +// CHECK9-NEXT: ret i32 0 // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 1, ptr @.str) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD1Ev +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC2El +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 -// CHECK11-SAME: () #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD2Ev +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52 +// CHECK9-SAME: () #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 5) +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 -// CHECK11-SAME: () #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57 +// CHECK9-SAME: () #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 1, ptr @.str) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 23) +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 -// CHECK11-SAME: () #[[ATTR2]] { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52 +// CHECK9-SAME: () #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK11-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK11: invoke.cont2: -// CHECK11-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 -// CHECK11-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 1) +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable // // -// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined -// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK11: cond.true: -// CHECK11-NEXT: br label [[COND_END:%.*]] -// CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: br label [[COND_END]] -// CHECK11: cond.end: -// CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK11: omp.loop.exit: -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK11-NEXT: ret void -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] -// CHECK11-NEXT: unreachable +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57 +// CHECK9-SAME: () #[[ATTR2]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined) +// CHECK9-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK11-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: ret void +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] +// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: [[CALL:%.*]] = invoke noundef i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) +// CHECK9-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]] +// CHECK9: invoke.cont2: +// CHECK9-NEXT: [[TMP7:%.*]] = sext i8 [[CALL]] to i32 +// CHECK9-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP7]]) +// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP14:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP15:%.*]] = extractvalue { ptr, i32 } [[TMP14]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP15]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 +// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 +// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9: cond.true: +// CHECK9-NEXT: br label [[COND_END:%.*]] +// CHECK9: cond.false: +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: br label [[COND_END]] +// CHECK9: cond.end: +// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] +// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK9: omp.inner.for.cond: +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9: omp.inner.for.body: +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +// CHECK9-NEXT: invoke void @_Z3foov() +// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] +// CHECK9: invoke.cont: +// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK9: omp.body.continue: +// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK9: omp.inner.for.inc: +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK9: omp.inner.for.end: +// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK9: omp.loop.exit: +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) +// CHECK9-NEXT: ret void +// CHECK9: terminate.lpad: +// CHECK9-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } +// CHECK9-NEXT: catch ptr null +// CHECK9-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 +// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR7]] +// CHECK9-NEXT: unreachable // // // CHECK13-LABEL: define {{[^@]+}}@main @@ -6433,11 +6293,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.region_id, ptr [[KERNEL_ARGS]]) +// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.region_id, ptr [[KERNEL_ARGS]]) // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92() #[[ATTR3:[0-9]+]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68() #[[ATTR3:[0-9]+]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: lpad: // CHECK13-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } @@ -6486,11 +6346,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4 // CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP37]], align 4 -// CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK13-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.region_id, ptr [[KERNEL_ARGS2]]) // CHECK13-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 // CHECK13-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] // CHECK13: omp_offload.failed3: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP19]]) #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74(i64 [[TMP19]]) #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]] // CHECK13: omp_offload.cont4: // CHECK13-NEXT: [[TMP40:%.*]] = load i8, ptr [[A]], align 1 @@ -6541,14 +6401,14 @@ int main() { // CHECK13-NEXT: ret i8 [[CONV]] // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68 // CHECK13-SAME: () #[[ATTR2:[0-9]+]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -6594,7 +6454,7 @@ int main() { // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -6609,7 +6469,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l68.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -6695,16 +6555,16 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74 // CHECK13-SAME: (i64 noundef [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK13-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined, ptr [[A_ADDR]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined, ptr [[A_ADDR]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -6755,7 +6615,7 @@ int main() { // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -6770,7 +6630,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l74.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -6882,11 +6742,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68() #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52() #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: omp_offload.cont: // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 @@ -6915,11 +6775,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) // CHECK13-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 // CHECK13-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] // CHECK13: omp_offload.failed3: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77() #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57() #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]] // CHECK13: omp_offload.cont4: // CHECK13-NEXT: ret i32 0 @@ -6958,11 +6818,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 // CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK13-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.region_id, ptr [[KERNEL_ARGS]]) // CHECK13-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK13-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68() #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52() #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: omp_offload.cont: // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 @@ -6991,11 +6851,11 @@ int main() { // CHECK13-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP26]], align 4 // CHECK13-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 // CHECK13-NEXT: store i32 0, ptr [[TMP27]], align 4 -// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.region_id, ptr [[KERNEL_ARGS2]]) +// CHECK13-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.region_id, ptr [[KERNEL_ARGS2]]) // CHECK13-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 // CHECK13-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] // CHECK13: omp_offload.failed3: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77() #[[ATTR3]] +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57() #[[ATTR3]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT4]] // CHECK13: omp_offload.cont4: // CHECK13-NEXT: ret i32 0 @@ -7025,14 +6885,14 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52 // CHECK13-SAME: () #[[ATTR2]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7078,7 +6938,7 @@ int main() { // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -7093,7 +6953,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l68.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l52.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7172,14 +7032,14 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57 // CHECK13-SAME: () #[[ATTR2]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7225,7 +7085,7 @@ int main() { // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -7240,7 +7100,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l77.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l57.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7319,14 +7179,14 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52 // CHECK13-SAME: () #[[ATTR2]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7372,7 +7232,7 @@ int main() { // CHECK13-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -7387,7 +7247,7 @@ int main() { // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l68.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l52.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7466,14 +7326,14 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77 +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57 // CHECK13-SAME: () #[[ATTR2]] { // CHECK13-NEXT: entry: -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 @@ -7528,7 +7388,7 @@ int main() { // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) +// CHECK13-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 @@ -7549,7 +7409,7 @@ int main() { // CHECK13-NEXT: unreachable // // -// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l77.omp_outlined.omp_outlined +// CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l57.omp_outlined.omp_outlined // CHECK13-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 diff --git a/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp deleted file mode 100644 index 7c4e995890921..0000000000000 --- a/clang/test/OpenMP/distribute_parallel_for_simd_num_threads_strict_codegen.cpp +++ /dev/null @@ -1,3541 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ -// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1 -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 - -// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3 -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 - -// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK9 -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9 - -// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK11 -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -no-enable-noundef-analysis -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK11 - -// expected-no-diagnostics -#ifndef HEADER -#define HEADER - -typedef __INTPTR_TYPE__ intptr_t; - - -void foo(); - -struct S { - intptr_t a, b, c; - S(intptr_t a) : a(a) {} - operator char() { extern void mayThrow(); mayThrow(); return a; } - ~S() {} -}; - -template -int tmain() { - char str[] = "msg1"; -#pragma omp target -#pragma omp teams -#pragma omp distribute parallel for simd num_threads(strict: C) severity(fatal) message("msg") - for (int i = 0; i < 100; i++) - foo(); -#pragma omp target -#pragma omp teams -#pragma omp distribute parallel for simd num_threads(strict: T(23)) severity(warning) message(str) - for (int i = 0; i < 100; i++) - foo(); - return 0; -} - -int main() { - S s(0); - char a = s; - const char *str = "msg1"; -#pragma omp target -#pragma omp teams -#pragma omp distribute parallel for simd num_threads(strict: 2) severity(warning) message("msg") - for (int i = 0; i < 100; i++) { - foo(); - } -#pragma omp target -#pragma omp teams - -#pragma omp distribute parallel for simd num_threads(strict: a) severity(fatal) message(str) - for (int i = 0; i < 100; i++) { - foo(); - } - return a + tmain() + tmain(); -} - -#endif -// CHECK1-LABEL: define {{[^@]+}}@main -// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0) -// CHECK1-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]]) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK1-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54() #[[ATTR3:[0-9]+]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: lpad: -// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: cleanup -// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[EH_RESUME:%.*]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP29]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 2, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP28]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP36]], align 8 -// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP39]], align 4 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP40]], align 4 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP41]], align 4 -// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 -// CHECK1-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: [[TMP44:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP44]] to i32 -// CHECK1-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont5: -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK1-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont7: -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: ret i32 [[TMP45]] -// CHECK1: eh.resume: -// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_Z8mayThrowv() -// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK1-NEXT: ret i8 [[CONV]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54 -// CHECK1-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 2, i32 1, ptr @.str.1), !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP19]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP19]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK1-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60 -// CHECK1-SAME: (i64 [[A:%.*]], ptr [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined, ptr [[A_ADDR]], ptr [[STR_ADDR]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]], ptr nonnull align 8 dereferenceable(8) [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META24:![0-9]+]] -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]], !align [[META25:![0-9]+]] -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP10:%.*]] = sext i8 [[TMP9]] to i32 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP3]], i32 [[TMP10]], i32 2, ptr [[TMP11]]), !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined, i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK1-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP29]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK1-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42(ptr [[STR]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK1-SAME: () #[[ATTR6]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37() #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.4, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42(ptr [[STR]]) #[[ATTR3]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP35]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42 -// CHECK1-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 -// CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP41]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37 -// CHECK1-SAME: () #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP47]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP47]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42 -// CHECK1-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP50]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP50]] -// CHECK1: invoke.cont2: -// CHECK1-NEXT: [[TMP8:%.*]] = sext i8 [[CALL]] to i32 -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP8]], i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP17:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP18:%.*]] = extractvalue { ptr, i32 } [[TMP17]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP18]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP50]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP53]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP53]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK1-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat align 2 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@main -// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I7:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0) -// CHECK3-NEXT: [[CALL:%.*]] = invoke signext i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]]) -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK3-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP2]] -// CHECK3: invoke.cont1: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] -// CHECK3: lpad: -// CHECK3-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: cleanup -// CHECK3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]] -// CHECK3-NEXT: br label [[EH_RESUME:%.*]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB5]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4 -// CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV6]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] -// CHECK3: omp.inner.for.cond8: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] -// CHECK3-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]] -// CHECK3: omp.inner.for.body10: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] -// CHECK3-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP6]] -// CHECK3: invoke.cont13: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] -// CHECK3: omp.body.continue14: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] -// CHECK3: omp.inner.for.inc15: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK3-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]] -// CHECK3: omp.inner.for.end17: -// CHECK3-NEXT: store i32 100, ptr [[I7]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32 -// CHECK3-NEXT: [[CALL19:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT18:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont18: -// CHECK3-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV]], [[CALL19]] -// CHECK3-NEXT: [[CALL22:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont21: -// CHECK3-NEXT: [[ADD23:%.*]] = add nsw i32 [[ADD20]], [[CALL22]] -// CHECK3-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]] -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: ret i32 [[TMP14]] -// CHECK3: eh.resume: -// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK3-NEXT: [[LPAD_VAL24:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL24]] -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP16]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_Z8mayThrowv() -// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK3-NEXT: ret i8 [[CONV]] -// -// -// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK3-SAME: (ptr [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat { -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]] -// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK3-SAME: () #[[ATTR4:[0-9]+]] comdat personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP9]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 -// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] -// CHECK3: omp.inner.for.cond7: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] -// CHECK3: omp.inner.for.body9: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP12]] -// CHECK3: invoke.cont12: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] -// CHECK3: omp.body.continue13: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] -// CHECK3: omp.inner.for.inc14: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] -// CHECK3: omp.inner.for.end16: -// CHECK3-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK3-NEXT: ret i32 0 -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK3-SAME: () #[[ATTR4]] comdat personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP15]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 -// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] -// CHECK3: omp.inner.for.cond7: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] -// CHECK3: omp.inner.for.body9: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP18]] -// CHECK3: invoke.cont12: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] -// CHECK3: omp.body.continue13: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] -// CHECK3: omp.inner.for.inc14: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP19:![0-9]+]] -// CHECK3: omp.inner.for.end16: -// CHECK3-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK3-NEXT: ret i32 0 -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK3-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@main -// CHECK9-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK9-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK9-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0) -// CHECK9-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]]) -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK9-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54() #[[ATTR3:[0-9]+]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: lpad: -// CHECK9-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: cleanup -// CHECK9-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK9-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK9-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[EH_RESUME:%.*]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK9-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP29]], align 4 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 2, ptr [[TMP30]], align 4 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP28]], ptr [[TMP32]], align 8 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP36]], align 8 -// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP38]], align 8 -// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP39]], align 4 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP40]], align 4 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP41]], align 4 -// CHECK9-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 -// CHECK9-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: [[TMP44:%.*]] = load i8, ptr [[A]], align 1 -// CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP44]] to i32 -// CHECK9-NEXT: [[CALL6:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv() -// CHECK9-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK9: invoke.cont5: -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK9-NEXT: [[CALL8:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv() -// CHECK9-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK9: invoke.cont7: -// CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR3]] -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP45]] -// CHECK9: eh.resume: -// CHECK9-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK9-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK9-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK9-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK9-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]]) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: call void @_Z8mayThrowv() -// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK9-NEXT: ret i8 [[CONV]] -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54 -// CHECK9-SAME: () #[[ATTR2:[0-9]+]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 2, i32 1, ptr @.str.1), !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l54.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP19]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP19]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK9-SAME: (ptr [[TMP0:%.*]]) #[[ATTR5:[0-9]+]] comdat { -// CHECK9-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR3]] -// CHECK9-NEXT: call void @_ZSt9terminatev() #[[ATTR8]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60 -// CHECK9-SAME: (i64 [[A:%.*]], ptr [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined, ptr [[A_ADDR]], ptr [[STR_ADDR]]) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(1) [[A:%.*]], ptr nonnull align 8 dereferenceable(8) [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META24:![0-9]+]] -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]], !align [[META25:![0-9]+]] -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP0]], align 1, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP10:%.*]] = sext i8 [[TMP9]] to i32 -// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP3]], i32 [[TMP10]], i32 2, ptr [[TMP11]]), !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined, i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK9-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP29]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP29]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK9-SAME: () #[[ATTR6:[0-9]+]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes.2, ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK9-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42(ptr [[STR]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: ret i32 0 -// -// -// CHECK9-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK9-SAME: () #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP11]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK9: omp_offload.failed: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37() #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK9-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes.4, ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK9-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK9-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK9-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP31]], align 4 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK9-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK9-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK9: omp_offload.failed3: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42(ptr [[STR]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK9: omp_offload.cont4: -// CHECK9-NEXT: ret i32 0 -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]] -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 5, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l37.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP35]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP35]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42 -// CHECK9-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined, ptr [[TMP0]]) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0 -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 -// CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l42.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP41]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP41]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37 -// CHECK9-SAME: () #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 1, i32 2, ptr @.str.1), !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l37.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP47]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP47]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42 -// CHECK9-SAME: (ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined, ptr [[TMP0]]) -// CHECK9-NEXT: ret void -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META24]] -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: invoke void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 23) -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP50]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK9-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP50]] -// CHECK9: invoke.cont2: -// CHECK9-NEXT: [[TMP8:%.*]] = sext i8 [[CALL]] to i32 -// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP0]], i64 0, i64 0 -// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP8]], i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR3]], !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP17:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP18:%.*]] = extractvalue { ptr, i32 } [[TMP17]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP18]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP50]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l42.omp_outlined.omp_outlined -// CHECK9-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[DOTPREVIOUS_LB_:%.*]], i64 [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR2]] personality ptr @__gxx_personality_v0 { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK9: cond.true: -// CHECK9-NEXT: br label [[COND_END:%.*]] -// CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: br label [[COND_END]] -// CHECK9: cond.end: -// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: invoke void @_Z3foov() -// CHECK9-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP53]] -// CHECK9: invoke.cont: -// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK9: omp.body.continue: -// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] -// CHECK9: omp.inner.for.end: -// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK9: omp.loop.exit: -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK9: .omp.final.then: -// CHECK9-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK9: .omp.final.done: -// CHECK9-NEXT: ret void -// CHECK9: terminate.lpad: -// CHECK9-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK9-NEXT: catch ptr null -// CHECK9-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK9-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP53]] -// CHECK9-NEXT: unreachable -// -// -// CHECK9-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK9-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR6]] comdat { -// CHECK9-NEXT: entry: -// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: ret void -// -// -// CHECK11-LABEL: define {{[^@]+}}@main -// CHECK11-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK11-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK11-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I7:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: call void @_ZN1SC1El(ptr nonnull align 8 dereferenceable(24) [[S]], i64 0) -// CHECK11-NEXT: [[CALL:%.*]] = invoke i8 @_ZN1ScvcEv(ptr nonnull align 8 dereferenceable(24) [[S]]) -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK11-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT1:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP2]] -// CHECK11: invoke.cont1: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] -// CHECK11: lpad: -// CHECK11-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: cleanup -// CHECK11-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0 -// CHECK11-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]] -// CHECK11-NEXT: br label [[EH_RESUME:%.*]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB5]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4 -// CHECK11-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV6]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] -// CHECK11: omp.inner.for.cond8: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] -// CHECK11-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]] -// CHECK11: omp.inner.for.body10: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP11]], 1 -// CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] -// CHECK11-NEXT: store i32 [[ADD12]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP6]] -// CHECK11: invoke.cont13: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] -// CHECK11: omp.body.continue14: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] -// CHECK11: omp.inner.for.inc15: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK11-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]] -// CHECK11: omp.inner.for.end17: -// CHECK11-NEXT: store i32 100, ptr [[I7]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i8, ptr [[A]], align 1 -// CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32 -// CHECK11-NEXT: [[CALL19:%.*]] = invoke i32 @_Z5tmainIcLi5EEiv() -// CHECK11-NEXT: to label [[INVOKE_CONT18:%.*]] unwind label [[LPAD]] -// CHECK11: invoke.cont18: -// CHECK11-NEXT: [[ADD20:%.*]] = add nsw i32 [[CONV]], [[CALL19]] -// CHECK11-NEXT: [[CALL22:%.*]] = invoke i32 @_Z5tmainI1SLi1EEiv() -// CHECK11-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]] -// CHECK11: invoke.cont21: -// CHECK11-NEXT: [[ADD23:%.*]] = add nsw i32 [[ADD20]], [[CALL22]] -// CHECK11-NEXT: store i32 [[ADD23]], ptr [[RETVAL]], align 4 -// CHECK11-NEXT: call void @_ZN1SD1Ev(ptr nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP14]] -// CHECK11: eh.resume: -// CHECK11-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK11-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK11-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK11-NEXT: [[LPAD_VAL24:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK11-NEXT: resume { ptr, i32 } [[LPAD_VAL24]] -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP16]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: unreachable -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: call void @_ZN1SC2El(ptr nonnull align 8 dereferenceable(24) [[THIS1]], i64 [[TMP0]]) -// CHECK11-NEXT: ret void -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR1]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: call void @_Z8mayThrowv() -// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK11-NEXT: ret i8 [[CONV]] -// -// -// CHECK11-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK11-SAME: (ptr [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] comdat { -// CHECK11-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]] -// CHECK11-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK11-NEXT: unreachable -// -// -// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK11-SAME: () #[[ATTR4:[0-9]+]] comdat personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I6:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP9]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] -// CHECK11: omp.inner.for.cond7: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] -// CHECK11: omp.inner.for.body9: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 -// CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK11-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP12]] -// CHECK11: invoke.cont12: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] -// CHECK11: omp.body.continue13: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] -// CHECK11: omp.inner.for.inc14: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] -// CHECK11: omp.inner.for.end16: -// CHECK11-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK11-NEXT: ret i32 0 -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: unreachable -// -// -// CHECK11-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK11-SAME: () #[[ATTR4]] comdat personality ptr @__gxx_personality_v0 { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[I6:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] -// CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP15]] -// CHECK11: invoke.cont: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK11: omp.body.continue: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] -// CHECK11: omp.inner.for.end: -// CHECK11-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 -// CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] -// CHECK11: omp.inner.for.cond7: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] -// CHECK11: omp.inner.for.body9: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 -// CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK11-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: invoke void @_Z3foov() -// CHECK11-NEXT: to label [[INVOKE_CONT12:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP18]] -// CHECK11: invoke.cont12: -// CHECK11-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] -// CHECK11: omp.body.continue13: -// CHECK11-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] -// CHECK11: omp.inner.for.inc14: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP19:![0-9]+]] -// CHECK11: omp.inner.for.end16: -// CHECK11-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK11-NEXT: ret i32 0 -// CHECK11: terminate.lpad: -// CHECK11-NEXT: [[TMP10:%.*]] = landingpad { ptr, i32 } -// CHECK11-NEXT: catch ptr null -// CHECK11-NEXT: [[TMP11:%.*]] = extractvalue { ptr, i32 } [[TMP10]], 0 -// CHECK11-NEXT: call void @__clang_call_terminate(ptr [[TMP11]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP15]] -// CHECK11-NEXT: unreachable -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: call void @_ZN1SD2Ev(ptr nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]] -// CHECK11-NEXT: ret void -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK11-NEXT: ret void -// -// -// CHECK11-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK11-SAME: (ptr nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat { -// CHECK11-NEXT: entry: -// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK11-NEXT: ret void -// diff --git a/clang/test/OpenMP/fuse_ast_print.cpp b/clang/test/OpenMP/fuse_ast_print.cpp new file mode 100644 index 0000000000000..283f5883c907d --- /dev/null +++ b/clang/test/OpenMP/fuse_ast_print.cpp @@ -0,0 +1,397 @@ +// Check no warnings/errors +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -fsyntax-only -verify %s +// expected-no-diagnostics + +// Check AST and unparsing +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -ast-dump %s | FileCheck %s --check-prefix=DUMP +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -ast-print %s | FileCheck %s --check-prefix=PRINT + +// Check same results after serialization round-trip +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -emit-pch -o %t %s +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -include-pch %t -ast-dump-all %s | FileCheck %s --check-prefix=DUMP +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -std=c++20 -fopenmp-version=60 -include-pch %t -ast-print %s | FileCheck %s --check-prefix=PRINT + +#ifndef HEADER +#define HEADER + +// placeholder for loop body code +extern "C" void body(...); + +// PRINT-LABEL: void foo1( +// DUMP-LABEL: FunctionDecl {{.*}} foo1 +void foo1() { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + + } + +} + +// PRINT-LABEL: void foo2( +// DUMP-LABEL: FunctionDecl {{.*}} foo2 +void foo2() { + // PRINT: #pragma omp unroll partial(4) + // DUMP: OMPUnrollDirective + // DUMP-NEXT: OMPPartialClause + // DUMP-NEXT: ConstantExpr + // DUMP-NEXT: value: Int 4 + // DUMP-NEXT: IntegerLiteral {{.*}} 4 + #pragma omp unroll partial(4) + // PRINT: #pragma omp fuse + // DUMP-NEXT: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + } + +} + +//PRINT-LABEL: void foo3( +//DUMP-LABEL: FunctionTemplateDecl {{.*}} foo3 +template +void foo3() { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: #pragma omp unroll partial(Factor1) + // DUMP: OMPUnrollDirective + #pragma omp unroll partial(Factor1) + // PRINT: for (int i = 0; i < 12; i += 1) + // DUMP: ForStmt + for (int i = 0; i < 12; i += 1) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: #pragma omp unroll partial(Factor2) + // DUMP: OMPUnrollDirective + #pragma omp unroll partial(Factor2) + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + + } +} + +// Also test instantiating the template. +void tfoo3() { + foo3<4,2>(); +} + +//PRINT-LABEL: void foo4( +//DUMP-LABEL: FunctionTemplateDecl {{.*}} foo4 +template +void foo4(int start, int end) { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (T i = start; i < end; i += Step) + // DUMP: ForStmt + for (T i = start; i < end; i += Step) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + + // PRINT: for (T j = end; j > start; j -= Step) + // DUMP: ForStmt + for (T j = end; j > start; j -= Step) { + // PRINT: body(j) + // DUMP: CallExpr + body(j); + } + + } +} + +// Also test instantiating the template. +void tfoo4() { + foo4(0, 64); +} + + + +// PRINT-LABEL: void foo5( +// DUMP-LABEL: FunctionDecl {{.*}} foo5 +void foo5() { + double arr[128], arr2[128]; + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT-NEXT: for (auto &&a : arr) + // DUMP-NEXT: CXXForRangeStmt + for (auto &&a: arr) + // PRINT: body(a) + // DUMP: CallExpr + body(a); + // PRINT: for (double v = 42; auto &&b : arr) + // DUMP: CXXForRangeStmt + for (double v = 42; auto &&b: arr) + // PRINT: body(b, v); + // DUMP: CallExpr + body(b, v); + // PRINT: for (auto &&c : arr2) + // DUMP: CXXForRangeStmt + for (auto &&c: arr2) + // PRINT: body(c) + // DUMP: CallExpr + body(c); + + } + +} + +// PRINT-LABEL: void foo6( +// DUMP-LABEL: FunctionDecl {{.*}} foo6 +void foo6() { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i <= 10; ++i) + // DUMP: ForStmt + for (int i = 0; i <= 10; ++i) + body(i); + // PRINT: for (int j = 0; j < 100; ++j) + // DUMP: ForStmt + for(int j = 0; j < 100; ++j) + body(j); + } + // PRINT: #pragma omp unroll partial(4) + // DUMP: OMPUnrollDirective + #pragma omp unroll partial(4) + // PRINT: for (int k = 0; k < 250; ++k) + // DUMP: ForStmt + for (int k = 0; k < 250; ++k) + body(k); + } +} + +// PRINT-LABEL: void foo7( +// DUMP-LABEL: FunctionDecl {{.*}} foo7 +void foo7() { + // PRINT: #pragma omp fuse + // DUMP: OMPFuseDirective + #pragma omp fuse + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + } + } + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + } + } + } + } + +} + +// PRINT-LABEL: void foo8( +// DUMP-LABEL: FunctionDecl {{.*}} foo8 +void foo8() { + // PRINT: #pragma omp fuse looprange(2,2) + // DUMP: OMPFuseDirective + // DUMP: OMPLooprangeClause + #pragma omp fuse looprange(2,2) + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + + } + +} + +//PRINT-LABEL: void foo9( +//DUMP-LABEL: FunctionTemplateDecl {{.*}} foo9 +//DUMP-LABEL: NonTypeTemplateParmDecl {{.*}} F +//DUMP-LABEL: NonTypeTemplateParmDecl {{.*}} C +template +void foo9() { + // PRINT: #pragma omp fuse looprange(F,C) + // DUMP: OMPFuseDirective + // DUMP: OMPLooprangeClause + #pragma omp fuse looprange(F,C) + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + + } +} + +// Also test instantiating the template. +void tfoo9() { + foo9<1, 2>(); +} + +// PRINT-LABEL: void foo10( +// DUMP-LABEL: FunctionDecl {{.*}} foo10 +void foo10() { + // PRINT: #pragma omp fuse looprange(2,2) + // DUMP: OMPFuseDirective + // DUMP: OMPLooprangeClause + #pragma omp fuse looprange(2,2) + // PRINT: { + // DUMP: CompoundStmt + { + // PRINT: for (int i = 0; i < 10; i += 2) + // DUMP: ForStmt + for (int i = 0; i < 10; i += 2) + // PRINT: body(i) + // DUMP: CallExpr + body(i); + // PRINT: for (int ii = 0; ii < 10; ii += 2) + // DUMP: ForStmt + for (int ii = 0; ii < 10; ii += 2) + // PRINT: body(ii) + // DUMP: CallExpr + body(ii); + // PRINT: #pragma omp fuse looprange(2,2) + // DUMP: OMPFuseDirective + // DUMP: OMPLooprangeClause + #pragma omp fuse looprange(2,2) + { + // PRINT: for (int j = 10; j > 0; --j) + // DUMP: ForStmt + for (int j = 10; j > 0; --j) + // PRINT: body(j) + // DUMP: CallExpr + body(j); + // PRINT: for (int jj = 10; jj > 0; --jj) + // DUMP: ForStmt + for (int jj = 10; jj > 0; --jj) + // PRINT: body(jj) + // DUMP: CallExpr + body(jj); + // PRINT: for (int k = 0; k <= 10; ++k) + // DUMP: ForStmt + for (int k = 0; k <= 10; ++k) + // PRINT: body(k) + // DUMP: CallExpr + body(k); + // PRINT: for (int kk = 0; kk <= 10; ++kk) + // DUMP: ForStmt + for (int kk = 0; kk <= 10; ++kk) + // PRINT: body(kk) + // DUMP: CallExpr + body(kk); + } + } + +} + +#endif diff --git a/clang/test/OpenMP/fuse_codegen.cpp b/clang/test/OpenMP/fuse_codegen.cpp new file mode 100644 index 0000000000000..742c280ed0172 --- /dev/null +++ b/clang/test/OpenMP/fuse_codegen.cpp @@ -0,0 +1,2328 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ --version 5 +// expected-no-diagnostics + +// Check code generation +// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 + +// Check same results after serialization round-trip +// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -emit-pch -o %t %s +// RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -std=c++20 -fclang-abi-compat=latest -fopenmp -fopenmp-version=60 -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2 + +#ifndef HEADER +#define HEADER + +//placeholder for loop body code. +extern "C" void body(...) {} + +extern "C" void foo1(int start1, int end1, int step1, int start2, int end2, int step2) { + int i,j; + #pragma omp fuse + { + for(i = start1; i < end1; i += step1) body(i); + for(j = start2; j < end2; j += step2) body(j); + } + +} + +template +void foo2(T start, T end, T step){ + T i,j,k; + #pragma omp fuse + { + for(i = start; i < end; i += step) body(i); + for(j = end; j > start; j -= step) body(j); + for(k = start+step; k < end+step; k += step) body(k); + } +} + +extern "C" void tfoo2() { + foo2(0, 64, 4); +} + +extern "C" void foo3() { + double arr[256]; + #pragma omp fuse + { + #pragma omp fuse + { + for(int i = 0; i < 128; ++i) body(i); + for(int j = 0; j < 256; j+=2) body(j); + } + for(int c = 42; auto &&v: arr) body(c,v); + for(int cc = 37; auto &&vv: arr) body(cc, vv); + } +} + +extern "C" void foo4() { + double arr[256]; + + #pragma omp fuse looprange(2,2) + { + for(int i = 0; i < 128; ++i) body(i); + for(int j = 0; j < 256; j+=2) body(j); + for(int k = 0; k < 64; ++k) body(k); + for(int c = 42; auto &&v: arr) body(c,v); + } +} + +// This exemplifies the usage of loop transformations that generate +// more than top level canonical loop nests (e.g split, loopranged fuse...) +extern "C" void foo5() { + double arr[256]; + #pragma omp fuse looprange(2,2) + { + #pragma omp fuse looprange(2,2) + { + for(int i = 0; i < 128; ++i) body(i); + for(int j = 0; j < 256; j+=2) body(j); + for(int k = 0; k < 512; ++k) body(k); + } + for(int c = 42; auto &&v: arr) body(c,v); + for(int cc = 37; auto &&vv: arr) body(cc, vv); + } +} + + +#endif +// CHECK1-LABEL: define dso_local void @body( +// CHECK1-SAME: ...) #[[ATTR0:[0-9]+]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @foo1( +// CHECK1-SAME: i32 noundef [[START1:%.*]], i32 noundef [[END1:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[START2:%.*]], i32 noundef [[END2:%.*]], i32 noundef [[STEP2:%.*]]) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[START1_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[END1_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[STEP1_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[START2_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[END2_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[STEP2_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP8:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store i32 [[START1]], ptr [[START1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[END1]], ptr [[END1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[STEP1]], ptr [[STEP1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[START2]], ptr [[START2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[END2]], ptr [[END2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[STEP2]], ptr [[STEP2_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[START1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[START1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[END1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP1_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]] +// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1 +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[START2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP9]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[START2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[END2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[STEP2_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[TMP13]], [[TMP14]] +// CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], [[TMP15]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], [[TMP16]] +// CHECK1-NEXT: [[SUB14:%.*]] = sub i32 [[DIV13]], 1 +// CHECK1-NEXT: store i32 [[SUB14]], ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: [[ADD15:%.*]] = add i32 [[TMP17]], 1 +// CHECK1-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP19]], [[TMP20]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP21]], %[[COND_TRUE]] ], [ [[TMP22]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: [[CMP16:%.*]] = icmp ult i32 [[TMP23]], [[TMP24]] +// CHECK1-NEXT: br i1 [[CMP16]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP17:%.*]] = icmp ult i32 [[TMP25]], [[TMP26]] +// CHECK1-NEXT: br i1 [[CMP17]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[ADD18:%.*]] = add i32 [[TMP27]], [[MUL]] +// CHECK1-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[MUL19:%.*]] = mul i32 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: [[ADD20:%.*]] = add i32 [[TMP30]], [[MUL19]] +// CHECK1-NEXT: store i32 [[ADD20]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP33]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP21:%.*]] = icmp ult i32 [[TMP34]], [[TMP35]] +// CHECK1-NEXT: br i1 [[CMP21]], label %[[IF_THEN22:.*]], label %[[IF_END27:.*]] +// CHECK1: [[IF_THEN22]]: +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL23:%.*]] = mul i32 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[ADD24:%.*]] = add i32 [[TMP36]], [[MUL23]] +// CHECK1-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[MUL25:%.*]] = mul i32 [[TMP40]], [[TMP41]] +// CHECK1-NEXT: [[ADD26:%.*]] = add i32 [[TMP39]], [[MUL25]] +// CHECK1-NEXT: store i32 [[ADD26]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP42]]) +// CHECK1-NEXT: br label %[[IF_END27]] +// CHECK1: [[IF_END27]]: +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add i32 [[TMP43]], 1 +// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @tfoo2( +// CHECK1-SAME: ) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: call void @_Z4foo2IiEvT_S0_S0_(i32 noundef 0, i32 noundef 64, i32 noundef 4) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define linkonce_odr void @_Z4foo2IiEvT_S0_S0_( +// CHECK1-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] comdat { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[STEP_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP8:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTNEW_STEP21:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]] +// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1 +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP9]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[TMP13]], [[TMP14]] +// CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], [[TMP15]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], [[TMP16]] +// CHECK1-NEXT: [[SUB14:%.*]] = sub i32 [[DIV13]], 1 +// CHECK1-NEXT: store i32 [[SUB14]], ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: [[ADD15:%.*]] = add i32 [[TMP17]], 1 +// CHECK1-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK1-NEXT: store i32 [[ADD16]], ptr [[K]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: store i32 [[ADD18]], ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP22]], [[TMP23]] +// CHECK1-NEXT: store i32 [[ADD20]], ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP24]], ptr [[DOTNEW_STEP21]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK1-NEXT: [[SUB23:%.*]] = sub i32 [[TMP25]], [[TMP26]] +// CHECK1-NEXT: [[SUB24:%.*]] = sub i32 [[SUB23]], 1 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK1-NEXT: [[ADD25:%.*]] = add i32 [[SUB24]], [[TMP27]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK1-NEXT: [[DIV26:%.*]] = udiv i32 [[ADD25]], [[TMP28]] +// CHECK1-NEXT: [[SUB27:%.*]] = sub i32 [[DIV26]], 1 +// CHECK1-NEXT: store i32 [[SUB27]], ptr [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB2]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST2]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK1-NEXT: [[ADD28:%.*]] = add i32 [[TMP29]], 1 +// CHECK1-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_NI2]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP30]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP33]], %[[COND_TRUE]] ], [ [[TMP34]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK1-NEXT: [[CMP29:%.*]] = icmp ugt i32 [[TMP35]], [[TMP36]] +// CHECK1-NEXT: br i1 [[CMP29]], label %[[COND_TRUE30:.*]], label %[[COND_FALSE31:.*]] +// CHECK1: [[COND_TRUE30]]: +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK1-NEXT: br label %[[COND_END32:.*]] +// CHECK1: [[COND_FALSE31]]: +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK1-NEXT: br label %[[COND_END32]] +// CHECK1: [[COND_END32]]: +// CHECK1-NEXT: [[COND33:%.*]] = phi i32 [ [[TMP37]], %[[COND_TRUE30]] ], [ [[TMP38]], %[[COND_FALSE31]] ] +// CHECK1-NEXT: store i32 [[COND33]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: [[CMP34:%.*]] = icmp ult i32 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: br i1 [[CMP34]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP35:%.*]] = icmp ult i32 [[TMP41]], [[TMP42]] +// CHECK1-NEXT: br i1 [[CMP35]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP44]], [[TMP45]] +// CHECK1-NEXT: [[ADD36:%.*]] = add i32 [[TMP43]], [[MUL]] +// CHECK1-NEXT: store i32 [[ADD36]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK1-NEXT: [[MUL37:%.*]] = mul i32 [[TMP47]], [[TMP48]] +// CHECK1-NEXT: [[ADD38:%.*]] = add i32 [[TMP46]], [[MUL37]] +// CHECK1-NEXT: store i32 [[ADD38]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP49]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP39:%.*]] = icmp ult i32 [[TMP50]], [[TMP51]] +// CHECK1-NEXT: br i1 [[CMP39]], label %[[IF_THEN40:.*]], label %[[IF_END45:.*]] +// CHECK1: [[IF_THEN40]]: +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL41:%.*]] = mul i32 [[TMP53]], [[TMP54]] +// CHECK1-NEXT: [[ADD42:%.*]] = add i32 [[TMP52]], [[MUL41]] +// CHECK1-NEXT: store i32 [[ADD42]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK1-NEXT: [[MUL43:%.*]] = mul i32 [[TMP56]], [[TMP57]] +// CHECK1-NEXT: [[SUB44:%.*]] = sub i32 [[TMP55]], [[MUL43]] +// CHECK1-NEXT: store i32 [[SUB44]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP58]]) +// CHECK1-NEXT: br label %[[IF_END45]] +// CHECK1: [[IF_END45]]: +// CHECK1-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK1-NEXT: [[CMP46:%.*]] = icmp ult i32 [[TMP59]], [[TMP60]] +// CHECK1-NEXT: br i1 [[CMP46]], label %[[IF_THEN47:.*]], label %[[IF_END52:.*]] +// CHECK1: [[IF_THEN47]]: +// CHECK1-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTOMP_LB2]], align 4 +// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTOMP_ST2]], align 4 +// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL48:%.*]] = mul i32 [[TMP62]], [[TMP63]] +// CHECK1-NEXT: [[ADD49:%.*]] = add i32 [[TMP61]], [[MUL48]] +// CHECK1-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV2]], align 4 +// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK1-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTOMP_IV2]], align 4 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK1-NEXT: [[MUL50:%.*]] = mul i32 [[TMP65]], [[TMP66]] +// CHECK1-NEXT: [[ADD51:%.*]] = add i32 [[TMP64]], [[MUL50]] +// CHECK1-NEXT: store i32 [[ADD51]], ptr [[K]], align 4 +// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[K]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP67]]) +// CHECK1-NEXT: br label %[[IF_END52]] +// CHECK1: [[IF_END52]]: +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP68:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add i32 [[TMP68]], 1 +// CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @foo3( +// CHECK1-SAME: ) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB03:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST04:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI05:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV06:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_LB116:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_ST117:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_NI118:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV120:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[CC:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE221:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END222:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN225:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_29:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_30:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_LB2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_ST2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_NI2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_TEMP_140:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_TEMP_2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX46:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX52:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[VV:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: store i32 128, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB03]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST04]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64 +// CHECK1-NEXT: store i64 [[CONV]], ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP8]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP9]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY7]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP10]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY9]], ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK1-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]] +// CHECK1-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8 +// CHECK1-NEXT: [[SUB12:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1 +// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i64 [[SUB12]], 1 +// CHECK1-NEXT: [[DIV14:%.*]] = sdiv i64 [[ADD13]], 1 +// CHECK1-NEXT: [[SUB15:%.*]] = sub nsw i64 [[DIV14]], 1 +// CHECK1-NEXT: store i64 [[SUB15]], ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB116]], align 8 +// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_ST117]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK1-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP14]], 1 +// CHECK1-NEXT: store i64 [[ADD19]], ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: store i32 37, ptr [[CC]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE221]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY23:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP15]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR24:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY23]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR24]], ptr [[__END222]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY26:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP16]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY26]], ptr [[__BEGIN225]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY28:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP17]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY28]], ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__END222]], align 8 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTCAPTURE_EXPR_29]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_29]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK1-NEXT: [[SUB_PTR_LHS_CAST31:%.*]] = ptrtoint ptr [[TMP19]] to i64 +// CHECK1-NEXT: [[SUB_PTR_RHS_CAST32:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[SUB_PTR_SUB33:%.*]] = sub i64 [[SUB_PTR_LHS_CAST31]], [[SUB_PTR_RHS_CAST32]] +// CHECK1-NEXT: [[SUB_PTR_DIV34:%.*]] = sdiv exact i64 [[SUB_PTR_SUB33]], 8 +// CHECK1-NEXT: [[SUB35:%.*]] = sub nsw i64 [[SUB_PTR_DIV34]], 1 +// CHECK1-NEXT: [[ADD36:%.*]] = add nsw i64 [[SUB35]], 1 +// CHECK1-NEXT: [[DIV37:%.*]] = sdiv i64 [[ADD36]], 1 +// CHECK1-NEXT: [[SUB38:%.*]] = sub nsw i64 [[DIV37]], 1 +// CHECK1-NEXT: store i64 [[SUB38]], ptr [[DOTCAPTURE_EXPR_30]], align 8 +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB2]], align 8 +// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_ST2]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_30]], align 8 +// CHECK1-NEXT: [[ADD39:%.*]] = add nsw i64 [[TMP21]], 1 +// CHECK1-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_NI2]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: store i64 [[TMP22]], ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[CMP41:%.*]] = icmp sgt i64 [[TMP23]], [[TMP24]] +// CHECK1-NEXT: br i1 [[CMP41]], label %[[COND_TRUE42:.*]], label %[[COND_FALSE43:.*]] +// CHECK1: [[COND_TRUE42]]: +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK1-NEXT: br label %[[COND_END44:.*]] +// CHECK1: [[COND_FALSE43]]: +// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: br label %[[COND_END44]] +// CHECK1: [[COND_END44]]: +// CHECK1-NEXT: [[COND45:%.*]] = phi i64 [ [[TMP25]], %[[COND_TRUE42]] ], [ [[TMP26]], %[[COND_FALSE43]] ] +// CHECK1-NEXT: store i64 [[COND45]], ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK1-NEXT: [[CMP47:%.*]] = icmp sgt i64 [[TMP27]], [[TMP28]] +// CHECK1-NEXT: br i1 [[CMP47]], label %[[COND_TRUE48:.*]], label %[[COND_FALSE49:.*]] +// CHECK1: [[COND_TRUE48]]: +// CHECK1-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK1-NEXT: br label %[[COND_END50:.*]] +// CHECK1: [[COND_FALSE49]]: +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK1-NEXT: br label %[[COND_END50]] +// CHECK1: [[COND_END50]]: +// CHECK1-NEXT: [[COND51:%.*]] = phi i64 [ [[TMP29]], %[[COND_TRUE48]] ], [ [[TMP30]], %[[COND_FALSE49]] ] +// CHECK1-NEXT: store i64 [[COND51]], ptr [[DOTOMP_FUSE_MAX46]], align 8 +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_FUSE_MAX46]], align 8 +// CHECK1-NEXT: [[CMP53:%.*]] = icmp slt i64 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: br i1 [[CMP53]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: [[CMP54:%.*]] = icmp slt i64 [[TMP33]], [[TMP34]] +// CHECK1-NEXT: br i1 [[CMP54]], label %[[IF_THEN:.*]], label %[[IF_END74:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB03]], align 4 +// CHECK1-NEXT: [[CONV55:%.*]] = sext i32 [[TMP35]] to i64 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_ST04]], align 4 +// CHECK1-NEXT: [[CONV56:%.*]] = sext i32 [[TMP36]] to i64 +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV56]], [[TMP37]] +// CHECK1-NEXT: [[ADD57:%.*]] = add nsw i64 [[CONV55]], [[MUL]] +// CHECK1-NEXT: [[CONV58:%.*]] = trunc i64 [[ADD57]] to i32 +// CHECK1-NEXT: store i32 [[CONV58]], ptr [[DOTOMP_IV06]], align 4 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV06]], align 4 +// CHECK1-NEXT: [[MUL59:%.*]] = mul nsw i32 [[TMP38]], 1 +// CHECK1-NEXT: [[ADD60:%.*]] = add nsw i32 0, [[MUL59]] +// CHECK1-NEXT: store i32 [[ADD60]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP61:%.*]] = icmp slt i32 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: br i1 [[CMP61]], label %[[IF_THEN62:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN62]]: +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL63:%.*]] = mul nsw i32 [[TMP42]], [[TMP43]] +// CHECK1-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP41]], [[MUL63]] +// CHECK1-NEXT: store i32 [[ADD64]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[MUL65:%.*]] = mul nsw i32 [[TMP44]], 1 +// CHECK1-NEXT: [[ADD66:%.*]] = add nsw i32 0, [[MUL65]] +// CHECK1-NEXT: store i32 [[ADD66]], ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP45]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP67:%.*]] = icmp slt i32 [[TMP46]], [[TMP47]] +// CHECK1-NEXT: br i1 [[CMP67]], label %[[IF_THEN68:.*]], label %[[IF_END73:.*]] +// CHECK1: [[IF_THEN68]]: +// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL69:%.*]] = mul nsw i32 [[TMP49]], [[TMP50]] +// CHECK1-NEXT: [[ADD70:%.*]] = add nsw i32 [[TMP48]], [[MUL69]] +// CHECK1-NEXT: store i32 [[ADD70]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[MUL71:%.*]] = mul nsw i32 [[TMP51]], 2 +// CHECK1-NEXT: [[ADD72:%.*]] = add nsw i32 0, [[MUL71]] +// CHECK1-NEXT: store i32 [[ADD72]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP52]]) +// CHECK1-NEXT: br label %[[IF_END73]] +// CHECK1: [[IF_END73]]: +// CHECK1-NEXT: br label %[[IF_END74]] +// CHECK1: [[IF_END74]]: +// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[CMP75:%.*]] = icmp slt i64 [[TMP53]], [[TMP54]] +// CHECK1-NEXT: br i1 [[CMP75]], label %[[IF_THEN76:.*]], label %[[IF_END81:.*]] +// CHECK1: [[IF_THEN76]]: +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_LB116]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_ST117]], align 8 +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[MUL77:%.*]] = mul nsw i64 [[TMP56]], [[TMP57]] +// CHECK1-NEXT: [[ADD78:%.*]] = add nsw i64 [[TMP55]], [[MUL77]] +// CHECK1-NEXT: store i64 [[ADD78]], ptr [[DOTOMP_IV120]], align 8 +// CHECK1-NEXT: [[TMP58:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV120]], align 8 +// CHECK1-NEXT: [[MUL79:%.*]] = mul nsw i64 [[TMP59]], 1 +// CHECK1-NEXT: [[ADD_PTR80:%.*]] = getelementptr inbounds double, ptr [[TMP58]], i64 [[MUL79]] +// CHECK1-NEXT: store ptr [[ADD_PTR80]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: store ptr [[TMP60]], ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP61:%.*]] = load i32, ptr [[C]], align 4 +// CHECK1-NEXT: [[TMP62:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP63:%.*]] = load double, ptr [[TMP62]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP61]], double noundef [[TMP63]]) +// CHECK1-NEXT: br label %[[IF_END81]] +// CHECK1: [[IF_END81]]: +// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK1-NEXT: [[CMP82:%.*]] = icmp slt i64 [[TMP64]], [[TMP65]] +// CHECK1-NEXT: br i1 [[CMP82]], label %[[IF_THEN83:.*]], label %[[IF_END88:.*]] +// CHECK1: [[IF_THEN83]]: +// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[DOTOMP_LB2]], align 8 +// CHECK1-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTOMP_ST2]], align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[MUL84:%.*]] = mul nsw i64 [[TMP67]], [[TMP68]] +// CHECK1-NEXT: [[ADD85:%.*]] = add nsw i64 [[TMP66]], [[MUL84]] +// CHECK1-NEXT: store i64 [[ADD85]], ptr [[DOTOMP_IV2]], align 8 +// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK1-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTOMP_IV2]], align 8 +// CHECK1-NEXT: [[MUL86:%.*]] = mul nsw i64 [[TMP70]], 1 +// CHECK1-NEXT: [[ADD_PTR87:%.*]] = getelementptr inbounds double, ptr [[TMP69]], i64 [[MUL86]] +// CHECK1-NEXT: store ptr [[ADD_PTR87]], ptr [[__BEGIN225]], align 8 +// CHECK1-NEXT: [[TMP71:%.*]] = load ptr, ptr [[__BEGIN225]], align 8 +// CHECK1-NEXT: store ptr [[TMP71]], ptr [[VV]], align 8 +// CHECK1-NEXT: [[TMP72:%.*]] = load i32, ptr [[CC]], align 4 +// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[VV]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = load double, ptr [[TMP73]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP72]], double noundef [[TMP74]]) +// CHECK1-NEXT: br label %[[IF_END88]] +// CHECK1: [[IF_END88]]: +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: [[INC:%.*]] = add nsw i64 [[TMP75]], 1 +// CHECK1-NEXT: store i64 [[INC]], ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @foo4( +// CHECK1-SAME: ) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[K]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: store i32 64, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP5]], 128 +// CHECK1-NEXT: br i1 [[CMP1]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP6]]) +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK1-NEXT: store i32 [[INC]], ptr [[I]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND2:.*]] +// CHECK1: [[FOR_COND2]]: +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: [[CMP3:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: br i1 [[CMP3]], label %[[FOR_BODY4:.*]], label %[[FOR_END17:.*]] +// CHECK1: [[FOR_BODY4]]: +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP10]], [[TMP11]] +// CHECK1-NEXT: br i1 [[CMP5]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], [[TMP14]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[MUL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 2 +// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] +// CHECK1-NEXT: store i32 [[ADD7]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP16]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP8:%.*]] = icmp slt i32 [[TMP17]], [[TMP18]] +// CHECK1-NEXT: br i1 [[CMP8]], label %[[IF_THEN9:.*]], label %[[IF_END14:.*]] +// CHECK1: [[IF_THEN9]]: +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP19]], [[MUL10]] +// CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP22]], 1 +// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] +// CHECK1-NEXT: store i32 [[ADD13]], ptr [[K]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[K]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP23]]) +// CHECK1-NEXT: br label %[[IF_END14]] +// CHECK1: [[IF_END14]]: +// CHECK1-NEXT: br label %[[FOR_INC15:.*]] +// CHECK1: [[FOR_INC15]]: +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[INC16:%.*]] = add nsw i32 [[TMP24]], 1 +// CHECK1-NEXT: store i32 [[INC16]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND2]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK1: [[FOR_END17]]: +// CHECK1-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP25]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY18:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP26]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY18]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND19:.*]] +// CHECK1: [[FOR_COND19]]: +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK1-NEXT: [[CMP20:%.*]] = icmp ne ptr [[TMP27]], [[TMP28]] +// CHECK1-NEXT: br i1 [[CMP20]], label %[[FOR_BODY21:.*]], label %[[FOR_END23:.*]] +// CHECK1: [[FOR_BODY21]]: +// CHECK1-NEXT: [[TMP29:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: store ptr [[TMP29]], ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[C]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load double, ptr [[TMP31]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP30]], double noundef [[TMP32]]) +// CHECK1-NEXT: br label %[[FOR_INC22:.*]] +// CHECK1: [[FOR_INC22]]: +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds nuw double, ptr [[TMP33]], i32 1 +// CHECK1-NEXT: store ptr [[INCDEC_PTR]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND19]] +// CHECK1: [[FOR_END23]]: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define dso_local void @foo5( +// CHECK1-SAME: ) #[[ATTR0]] { +// CHECK1-NEXT: [[ENTRY:.*:]] +// CHECK1-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_LB03:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_ST04:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_NI05:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV06:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_LB116:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_ST117:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_NI118:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IV120:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_TEMP_121:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_FUSE_MAX22:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_FUSE_INDEX29:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[CC:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[__RANGE264:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__BEGIN265:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[__END267:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[VV:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[K]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: store i32 512, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK1: [[COND_TRUE]]: +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK1-NEXT: br label %[[COND_END:.*]] +// CHECK1: [[COND_FALSE]]: +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: br label %[[COND_END]] +// CHECK1: [[COND_END]]: +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB03]], align 4 +// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_ST04]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64 +// CHECK1-NEXT: store i64 [[CONV]], ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP8]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP9]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY7]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP10]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY9]], ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK1-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]] +// CHECK1-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8 +// CHECK1-NEXT: [[SUB12:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1 +// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i64 [[SUB12]], 1 +// CHECK1-NEXT: [[DIV14:%.*]] = sdiv i64 [[ADD13]], 1 +// CHECK1-NEXT: [[SUB15:%.*]] = sub nsw i64 [[DIV14]], 1 +// CHECK1-NEXT: store i64 [[SUB15]], ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB116]], align 8 +// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_ST117]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK1-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP14]], 1 +// CHECK1-NEXT: store i64 [[ADD19]], ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[CMP23:%.*]] = icmp sgt i64 [[TMP16]], [[TMP17]] +// CHECK1-NEXT: br i1 [[CMP23]], label %[[COND_TRUE24:.*]], label %[[COND_FALSE25:.*]] +// CHECK1: [[COND_TRUE24]]: +// CHECK1-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK1-NEXT: br label %[[COND_END26:.*]] +// CHECK1: [[COND_FALSE25]]: +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: br label %[[COND_END26]] +// CHECK1: [[COND_END26]]: +// CHECK1-NEXT: [[COND27:%.*]] = phi i64 [ [[TMP18]], %[[COND_TRUE24]] ], [ [[TMP19]], %[[COND_FALSE25]] ] +// CHECK1-NEXT: store i64 [[COND27]], ptr [[DOTOMP_FUSE_MAX22]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND:.*]] +// CHECK1: [[FOR_COND]]: +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP20]], 128 +// CHECK1-NEXT: br i1 [[CMP28]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK1: [[FOR_BODY]]: +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP21]]) +// CHECK1-NEXT: br label %[[FOR_INC:.*]] +// CHECK1: [[FOR_INC]]: +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP22]], 1 +// CHECK1-NEXT: store i32 [[INC]], ptr [[I]], align 4 +// CHECK1-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK1: [[FOR_END]]: +// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND30:.*]] +// CHECK1: [[FOR_COND30]]: +// CHECK1-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_FUSE_MAX22]], align 8 +// CHECK1-NEXT: [[CMP31:%.*]] = icmp slt i64 [[TMP23]], [[TMP24]] +// CHECK1-NEXT: br i1 [[CMP31]], label %[[FOR_BODY32:.*]], label %[[FOR_END63:.*]] +// CHECK1: [[FOR_BODY32]]: +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK1-NEXT: [[CMP33:%.*]] = icmp slt i64 [[TMP25]], [[TMP26]] +// CHECK1-NEXT: br i1 [[CMP33]], label %[[IF_THEN:.*]], label %[[IF_END53:.*]] +// CHECK1: [[IF_THEN]]: +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB03]], align 4 +// CHECK1-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_ST04]], align 4 +// CHECK1-NEXT: [[CONV35:%.*]] = sext i32 [[TMP28]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV35]], [[TMP29]] +// CHECK1-NEXT: [[ADD36:%.*]] = add nsw i64 [[CONV34]], [[MUL]] +// CHECK1-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 +// CHECK1-NEXT: store i32 [[CONV37]], ptr [[DOTOMP_IV06]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV06]], align 4 +// CHECK1-NEXT: [[MUL38:%.*]] = mul nsw i32 [[TMP30]], 1 +// CHECK1-NEXT: [[ADD39:%.*]] = add nsw i32 0, [[MUL38]] +// CHECK1-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK1-NEXT: [[CMP40:%.*]] = icmp slt i32 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: br i1 [[CMP40]], label %[[IF_THEN41:.*]], label %[[IF_END:.*]] +// CHECK1: [[IF_THEN41]]: +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL42:%.*]] = mul nsw i32 [[TMP34]], [[TMP35]] +// CHECK1-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP33]], [[MUL42]] +// CHECK1-NEXT: store i32 [[ADD43]], ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK1-NEXT: [[MUL44:%.*]] = mul nsw i32 [[TMP36]], 2 +// CHECK1-NEXT: [[ADD45:%.*]] = add nsw i32 0, [[MUL44]] +// CHECK1-NEXT: store i32 [[ADD45]], ptr [[J]], align 4 +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[J]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP37]]) +// CHECK1-NEXT: br label %[[IF_END]] +// CHECK1: [[IF_END]]: +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK1-NEXT: [[CMP46:%.*]] = icmp slt i32 [[TMP38]], [[TMP39]] +// CHECK1-NEXT: br i1 [[CMP46]], label %[[IF_THEN47:.*]], label %[[IF_END52:.*]] +// CHECK1: [[IF_THEN47]]: +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK1-NEXT: [[MUL48:%.*]] = mul nsw i32 [[TMP41]], [[TMP42]] +// CHECK1-NEXT: [[ADD49:%.*]] = add nsw i32 [[TMP40]], [[MUL48]] +// CHECK1-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK1-NEXT: [[MUL50:%.*]] = mul nsw i32 [[TMP43]], 1 +// CHECK1-NEXT: [[ADD51:%.*]] = add nsw i32 0, [[MUL50]] +// CHECK1-NEXT: store i32 [[ADD51]], ptr [[K]], align 4 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[K]], align 4 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP44]]) +// CHECK1-NEXT: br label %[[IF_END52]] +// CHECK1: [[IF_END52]]: +// CHECK1-NEXT: br label %[[IF_END53]] +// CHECK1: [[IF_END53]]: +// CHECK1-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK1-NEXT: [[CMP54:%.*]] = icmp slt i64 [[TMP45]], [[TMP46]] +// CHECK1-NEXT: br i1 [[CMP54]], label %[[IF_THEN55:.*]], label %[[IF_END60:.*]] +// CHECK1: [[IF_THEN55]]: +// CHECK1-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTOMP_LB116]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTOMP_ST117]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[MUL56:%.*]] = mul nsw i64 [[TMP48]], [[TMP49]] +// CHECK1-NEXT: [[ADD57:%.*]] = add nsw i64 [[TMP47]], [[MUL56]] +// CHECK1-NEXT: store i64 [[ADD57]], ptr [[DOTOMP_IV120]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV120]], align 8 +// CHECK1-NEXT: [[MUL58:%.*]] = mul nsw i64 [[TMP51]], 1 +// CHECK1-NEXT: [[ADD_PTR59:%.*]] = getelementptr inbounds double, ptr [[TMP50]], i64 [[MUL58]] +// CHECK1-NEXT: store ptr [[ADD_PTR59]], ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: [[TMP52:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK1-NEXT: store ptr [[TMP52]], ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[C]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load double, ptr [[TMP54]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP53]], double noundef [[TMP55]]) +// CHECK1-NEXT: br label %[[IF_END60]] +// CHECK1: [[IF_END60]]: +// CHECK1-NEXT: br label %[[FOR_INC61:.*]] +// CHECK1: [[FOR_INC61]]: +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: [[INC62:%.*]] = add nsw i64 [[TMP56]], 1 +// CHECK1-NEXT: store i64 [[INC62]], ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND30]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK1: [[FOR_END63]]: +// CHECK1-NEXT: store i32 37, ptr [[CC]], align 4 +// CHECK1-NEXT: store ptr [[ARR]], ptr [[__RANGE264]], align 8 +// CHECK1-NEXT: [[TMP57:%.*]] = load ptr, ptr [[__RANGE264]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY66:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP57]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARRAYDECAY66]], ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: [[TMP58:%.*]] = load ptr, ptr [[__RANGE264]], align 8 +// CHECK1-NEXT: [[ARRAYDECAY68:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP58]], i64 0, i64 0 +// CHECK1-NEXT: [[ADD_PTR69:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY68]], i64 256 +// CHECK1-NEXT: store ptr [[ADD_PTR69]], ptr [[__END267]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND70:.*]] +// CHECK1: [[FOR_COND70]]: +// CHECK1-NEXT: [[TMP59:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = load ptr, ptr [[__END267]], align 8 +// CHECK1-NEXT: [[CMP71:%.*]] = icmp ne ptr [[TMP59]], [[TMP60]] +// CHECK1-NEXT: br i1 [[CMP71]], label %[[FOR_BODY72:.*]], label %[[FOR_END74:.*]] +// CHECK1: [[FOR_BODY72]]: +// CHECK1-NEXT: [[TMP61:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: store ptr [[TMP61]], ptr [[VV]], align 8 +// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[CC]], align 4 +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[VV]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load double, ptr [[TMP63]], align 8 +// CHECK1-NEXT: call void (...) @body(i32 noundef [[TMP62]], double noundef [[TMP64]]) +// CHECK1-NEXT: br label %[[FOR_INC73:.*]] +// CHECK1: [[FOR_INC73]]: +// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds nuw double, ptr [[TMP65]], i32 1 +// CHECK1-NEXT: store ptr [[INCDEC_PTR]], ptr [[__BEGIN265]], align 8 +// CHECK1-NEXT: br label %[[FOR_COND70]] +// CHECK1: [[FOR_END74]]: +// CHECK1-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @body( +// CHECK2-SAME: ...) #[[ATTR0:[0-9]+]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @foo1( +// CHECK2-SAME: i32 noundef [[START1:%.*]], i32 noundef [[END1:%.*]], i32 noundef [[STEP1:%.*]], i32 noundef [[START2:%.*]], i32 noundef [[END2:%.*]], i32 noundef [[STEP2:%.*]]) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[START1_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[END1_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[STEP1_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[START2_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[END2_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[STEP2_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP8:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 [[START1]], ptr [[START1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[END1]], ptr [[END1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[STEP1]], ptr [[STEP1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[START2]], ptr [[START2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[END2]], ptr [[END2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[STEP2]], ptr [[STEP2_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[START1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[START1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[END1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP1_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]] +// CHECK2-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]] +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]] +// CHECK2-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1 +// CHECK2-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[START2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP9]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[START2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[END2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[STEP2_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP12]], ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[SUB10:%.*]] = sub i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], [[TMP15]] +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], [[TMP16]] +// CHECK2-NEXT: [[SUB14:%.*]] = sub i32 [[DIV13]], 1 +// CHECK2-NEXT: store i32 [[SUB14]], ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK2-NEXT: [[ADD15:%.*]] = add i32 [[TMP17]], 1 +// CHECK2-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP19]], [[TMP20]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP21]], %[[COND_TRUE]] ], [ [[TMP22]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: [[CMP16:%.*]] = icmp ult i32 [[TMP23]], [[TMP24]] +// CHECK2-NEXT: br i1 [[CMP16]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP17:%.*]] = icmp ult i32 [[TMP25]], [[TMP26]] +// CHECK2-NEXT: br i1 [[CMP17]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP28]], [[TMP29]] +// CHECK2-NEXT: [[ADD18:%.*]] = add i32 [[TMP27]], [[MUL]] +// CHECK2-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[MUL19:%.*]] = mul i32 [[TMP31]], [[TMP32]] +// CHECK2-NEXT: [[ADD20:%.*]] = add i32 [[TMP30]], [[MUL19]] +// CHECK2-NEXT: store i32 [[ADD20]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP33]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP21:%.*]] = icmp ult i32 [[TMP34]], [[TMP35]] +// CHECK2-NEXT: br i1 [[CMP21]], label %[[IF_THEN22:.*]], label %[[IF_END27:.*]] +// CHECK2: [[IF_THEN22]]: +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL23:%.*]] = mul i32 [[TMP37]], [[TMP38]] +// CHECK2-NEXT: [[ADD24:%.*]] = add i32 [[TMP36]], [[MUL23]] +// CHECK2-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[MUL25:%.*]] = mul i32 [[TMP40]], [[TMP41]] +// CHECK2-NEXT: [[ADD26:%.*]] = add i32 [[TMP39]], [[MUL25]] +// CHECK2-NEXT: store i32 [[ADD26]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP42]]) +// CHECK2-NEXT: br label %[[IF_END27]] +// CHECK2: [[IF_END27]]: +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add i32 [[TMP43]], 1 +// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @foo3( +// CHECK2-SAME: ) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB03:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST04:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI05:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV06:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_LB116:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_ST117:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_NI118:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV120:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[CC:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE221:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END222:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN225:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_29:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_30:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_LB2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_ST2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_NI2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_TEMP_140:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_TEMP_2:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX46:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX52:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[VV:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: store i32 128, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB03]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST04]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK2-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64 +// CHECK2-NEXT: store i64 [[CONV]], ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP8]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP9]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY7]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP10]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY9]], ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK2-NEXT: store ptr [[TMP11]], ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK2-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK2-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]] +// CHECK2-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8 +// CHECK2-NEXT: [[SUB12:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1 +// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i64 [[SUB12]], 1 +// CHECK2-NEXT: [[DIV14:%.*]] = sdiv i64 [[ADD13]], 1 +// CHECK2-NEXT: [[SUB15:%.*]] = sub nsw i64 [[DIV14]], 1 +// CHECK2-NEXT: store i64 [[SUB15]], ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_LB116]], align 8 +// CHECK2-NEXT: store i64 1, ptr [[DOTOMP_ST117]], align 8 +// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK2-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP14]], 1 +// CHECK2-NEXT: store i64 [[ADD19]], ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: store i32 37, ptr [[CC]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE221]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY23:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP15]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR24:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY23]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR24]], ptr [[__END222]], align 8 +// CHECK2-NEXT: [[TMP16:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY26:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP16]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY26]], ptr [[__BEGIN225]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[__RANGE221]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY28:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP17]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY28]], ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__END222]], align 8 +// CHECK2-NEXT: store ptr [[TMP18]], ptr [[DOTCAPTURE_EXPR_29]], align 8 +// CHECK2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_29]], align 8 +// CHECK2-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK2-NEXT: [[SUB_PTR_LHS_CAST31:%.*]] = ptrtoint ptr [[TMP19]] to i64 +// CHECK2-NEXT: [[SUB_PTR_RHS_CAST32:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK2-NEXT: [[SUB_PTR_SUB33:%.*]] = sub i64 [[SUB_PTR_LHS_CAST31]], [[SUB_PTR_RHS_CAST32]] +// CHECK2-NEXT: [[SUB_PTR_DIV34:%.*]] = sdiv exact i64 [[SUB_PTR_SUB33]], 8 +// CHECK2-NEXT: [[SUB35:%.*]] = sub nsw i64 [[SUB_PTR_DIV34]], 1 +// CHECK2-NEXT: [[ADD36:%.*]] = add nsw i64 [[SUB35]], 1 +// CHECK2-NEXT: [[DIV37:%.*]] = sdiv i64 [[ADD36]], 1 +// CHECK2-NEXT: [[SUB38:%.*]] = sub nsw i64 [[DIV37]], 1 +// CHECK2-NEXT: store i64 [[SUB38]], ptr [[DOTCAPTURE_EXPR_30]], align 8 +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_LB2]], align 8 +// CHECK2-NEXT: store i64 1, ptr [[DOTOMP_ST2]], align 8 +// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_30]], align 8 +// CHECK2-NEXT: [[ADD39:%.*]] = add nsw i64 [[TMP21]], 1 +// CHECK2-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_NI2]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: store i64 [[TMP22]], ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK2-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK2-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[CMP41:%.*]] = icmp sgt i64 [[TMP23]], [[TMP24]] +// CHECK2-NEXT: br i1 [[CMP41]], label %[[COND_TRUE42:.*]], label %[[COND_FALSE43:.*]] +// CHECK2: [[COND_TRUE42]]: +// CHECK2-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_TEMP_140]], align 8 +// CHECK2-NEXT: br label %[[COND_END44:.*]] +// CHECK2: [[COND_FALSE43]]: +// CHECK2-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: br label %[[COND_END44]] +// CHECK2: [[COND_END44]]: +// CHECK2-NEXT: [[COND45:%.*]] = phi i64 [ [[TMP25]], %[[COND_TRUE42]] ], [ [[TMP26]], %[[COND_FALSE43]] ] +// CHECK2-NEXT: store i64 [[COND45]], ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK2-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK2-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK2-NEXT: [[CMP47:%.*]] = icmp sgt i64 [[TMP27]], [[TMP28]] +// CHECK2-NEXT: br i1 [[CMP47]], label %[[COND_TRUE48:.*]], label %[[COND_FALSE49:.*]] +// CHECK2: [[COND_TRUE48]]: +// CHECK2-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_TEMP_2]], align 8 +// CHECK2-NEXT: br label %[[COND_END50:.*]] +// CHECK2: [[COND_FALSE49]]: +// CHECK2-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK2-NEXT: br label %[[COND_END50]] +// CHECK2: [[COND_END50]]: +// CHECK2-NEXT: [[COND51:%.*]] = phi i64 [ [[TMP29]], %[[COND_TRUE48]] ], [ [[TMP30]], %[[COND_FALSE49]] ] +// CHECK2-NEXT: store i64 [[COND51]], ptr [[DOTOMP_FUSE_MAX46]], align 8 +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_FUSE_MAX46]], align 8 +// CHECK2-NEXT: [[CMP53:%.*]] = icmp slt i64 [[TMP31]], [[TMP32]] +// CHECK2-NEXT: br i1 [[CMP53]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: [[CMP54:%.*]] = icmp slt i64 [[TMP33]], [[TMP34]] +// CHECK2-NEXT: br i1 [[CMP54]], label %[[IF_THEN:.*]], label %[[IF_END74:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB03]], align 4 +// CHECK2-NEXT: [[CONV55:%.*]] = sext i32 [[TMP35]] to i64 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_ST04]], align 4 +// CHECK2-NEXT: [[CONV56:%.*]] = sext i32 [[TMP36]] to i64 +// CHECK2-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV56]], [[TMP37]] +// CHECK2-NEXT: [[ADD57:%.*]] = add nsw i64 [[CONV55]], [[MUL]] +// CHECK2-NEXT: [[CONV58:%.*]] = trunc i64 [[ADD57]] to i32 +// CHECK2-NEXT: store i32 [[CONV58]], ptr [[DOTOMP_IV06]], align 4 +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV06]], align 4 +// CHECK2-NEXT: [[MUL59:%.*]] = mul nsw i32 [[TMP38]], 1 +// CHECK2-NEXT: [[ADD60:%.*]] = add nsw i32 0, [[MUL59]] +// CHECK2-NEXT: store i32 [[ADD60]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP61:%.*]] = icmp slt i32 [[TMP39]], [[TMP40]] +// CHECK2-NEXT: br i1 [[CMP61]], label %[[IF_THEN62:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN62]]: +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL63:%.*]] = mul nsw i32 [[TMP42]], [[TMP43]] +// CHECK2-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP41]], [[MUL63]] +// CHECK2-NEXT: store i32 [[ADD64]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[MUL65:%.*]] = mul nsw i32 [[TMP44]], 1 +// CHECK2-NEXT: [[ADD66:%.*]] = add nsw i32 0, [[MUL65]] +// CHECK2-NEXT: store i32 [[ADD66]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP45:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP45]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP67:%.*]] = icmp slt i32 [[TMP46]], [[TMP47]] +// CHECK2-NEXT: br i1 [[CMP67]], label %[[IF_THEN68:.*]], label %[[IF_END73:.*]] +// CHECK2: [[IF_THEN68]]: +// CHECK2-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL69:%.*]] = mul nsw i32 [[TMP49]], [[TMP50]] +// CHECK2-NEXT: [[ADD70:%.*]] = add nsw i32 [[TMP48]], [[MUL69]] +// CHECK2-NEXT: store i32 [[ADD70]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[MUL71:%.*]] = mul nsw i32 [[TMP51]], 2 +// CHECK2-NEXT: [[ADD72:%.*]] = add nsw i32 0, [[MUL71]] +// CHECK2-NEXT: store i32 [[ADD72]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP52:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP52]]) +// CHECK2-NEXT: br label %[[IF_END73]] +// CHECK2: [[IF_END73]]: +// CHECK2-NEXT: br label %[[IF_END74]] +// CHECK2: [[IF_END74]]: +// CHECK2-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[CMP75:%.*]] = icmp slt i64 [[TMP53]], [[TMP54]] +// CHECK2-NEXT: br i1 [[CMP75]], label %[[IF_THEN76:.*]], label %[[IF_END81:.*]] +// CHECK2: [[IF_THEN76]]: +// CHECK2-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_LB116]], align 8 +// CHECK2-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_ST117]], align 8 +// CHECK2-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[MUL77:%.*]] = mul nsw i64 [[TMP56]], [[TMP57]] +// CHECK2-NEXT: [[ADD78:%.*]] = add nsw i64 [[TMP55]], [[MUL77]] +// CHECK2-NEXT: store i64 [[ADD78]], ptr [[DOTOMP_IV120]], align 8 +// CHECK2-NEXT: [[TMP58:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV120]], align 8 +// CHECK2-NEXT: [[MUL79:%.*]] = mul nsw i64 [[TMP59]], 1 +// CHECK2-NEXT: [[ADD_PTR80:%.*]] = getelementptr inbounds double, ptr [[TMP58]], i64 [[MUL79]] +// CHECK2-NEXT: store ptr [[ADD_PTR80]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP60:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: store ptr [[TMP60]], ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP61:%.*]] = load i32, ptr [[C]], align 4 +// CHECK2-NEXT: [[TMP62:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP63:%.*]] = load double, ptr [[TMP62]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP61]], double noundef [[TMP63]]) +// CHECK2-NEXT: br label %[[IF_END81]] +// CHECK2: [[IF_END81]]: +// CHECK2-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_NI2]], align 8 +// CHECK2-NEXT: [[CMP82:%.*]] = icmp slt i64 [[TMP64]], [[TMP65]] +// CHECK2-NEXT: br i1 [[CMP82]], label %[[IF_THEN83:.*]], label %[[IF_END88:.*]] +// CHECK2: [[IF_THEN83]]: +// CHECK2-NEXT: [[TMP66:%.*]] = load i64, ptr [[DOTOMP_LB2]], align 8 +// CHECK2-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTOMP_ST2]], align 8 +// CHECK2-NEXT: [[TMP68:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[MUL84:%.*]] = mul nsw i64 [[TMP67]], [[TMP68]] +// CHECK2-NEXT: [[ADD85:%.*]] = add nsw i64 [[TMP66]], [[MUL84]] +// CHECK2-NEXT: store i64 [[ADD85]], ptr [[DOTOMP_IV2]], align 8 +// CHECK2-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_27]], align 8 +// CHECK2-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTOMP_IV2]], align 8 +// CHECK2-NEXT: [[MUL86:%.*]] = mul nsw i64 [[TMP70]], 1 +// CHECK2-NEXT: [[ADD_PTR87:%.*]] = getelementptr inbounds double, ptr [[TMP69]], i64 [[MUL86]] +// CHECK2-NEXT: store ptr [[ADD_PTR87]], ptr [[__BEGIN225]], align 8 +// CHECK2-NEXT: [[TMP71:%.*]] = load ptr, ptr [[__BEGIN225]], align 8 +// CHECK2-NEXT: store ptr [[TMP71]], ptr [[VV]], align 8 +// CHECK2-NEXT: [[TMP72:%.*]] = load i32, ptr [[CC]], align 4 +// CHECK2-NEXT: [[TMP73:%.*]] = load ptr, ptr [[VV]], align 8 +// CHECK2-NEXT: [[TMP74:%.*]] = load double, ptr [[TMP73]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP72]], double noundef [[TMP74]]) +// CHECK2-NEXT: br label %[[IF_END88]] +// CHECK2: [[IF_END88]]: +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: [[INC:%.*]] = add nsw i64 [[TMP75]], 1 +// CHECK2-NEXT: store i64 [[INC]], ptr [[DOTOMP_FUSE_INDEX52]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @foo4( +// CHECK2-SAME: ) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[K]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: store i32 64, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP5]], 128 +// CHECK2-NEXT: br i1 [[CMP1]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP6]]) +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK2-NEXT: store i32 [[INC]], ptr [[I]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND2:.*]] +// CHECK2: [[FOR_COND2]]: +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: [[CMP3:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] +// CHECK2-NEXT: br i1 [[CMP3]], label %[[FOR_BODY4:.*]], label %[[FOR_END17:.*]] +// CHECK2: [[FOR_BODY4]]: +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP10]], [[TMP11]] +// CHECK2-NEXT: br i1 [[CMP5]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[MUL]] +// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 2 +// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] +// CHECK2-NEXT: store i32 [[ADD7]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP16]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP8:%.*]] = icmp slt i32 [[TMP17]], [[TMP18]] +// CHECK2-NEXT: br i1 [[CMP8]], label %[[IF_THEN9:.*]], label %[[IF_END14:.*]] +// CHECK2: [[IF_THEN9]]: +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP20]], [[TMP21]] +// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP19]], [[MUL10]] +// CHECK2-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP22]], 1 +// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] +// CHECK2-NEXT: store i32 [[ADD13]], ptr [[K]], align 4 +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[K]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP23]]) +// CHECK2-NEXT: br label %[[IF_END14]] +// CHECK2: [[IF_END14]]: +// CHECK2-NEXT: br label %[[FOR_INC15:.*]] +// CHECK2: [[FOR_INC15]]: +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[INC16:%.*]] = add nsw i32 [[TMP24]], 1 +// CHECK2-NEXT: store i32 [[INC16]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND2]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK2: [[FOR_END17]]: +// CHECK2-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[TMP25:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP25]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP26:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY18:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP26]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY18]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND19:.*]] +// CHECK2: [[FOR_COND19]]: +// CHECK2-NEXT: [[TMP27:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP28:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK2-NEXT: [[CMP20:%.*]] = icmp ne ptr [[TMP27]], [[TMP28]] +// CHECK2-NEXT: br i1 [[CMP20]], label %[[FOR_BODY21:.*]], label %[[FOR_END23:.*]] +// CHECK2: [[FOR_BODY21]]: +// CHECK2-NEXT: [[TMP29:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: store ptr [[TMP29]], ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[C]], align 4 +// CHECK2-NEXT: [[TMP31:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = load double, ptr [[TMP31]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP30]], double noundef [[TMP32]]) +// CHECK2-NEXT: br label %[[FOR_INC22:.*]] +// CHECK2: [[FOR_INC22]]: +// CHECK2-NEXT: [[TMP33:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds nuw double, ptr [[TMP33]], i32 1 +// CHECK2-NEXT: store ptr [[INCDEC_PTR]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND19]] +// CHECK2: [[FOR_END23]]: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @foo5( +// CHECK2-SAME: ) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[ARR:%.*]] = alloca [256 x double], align 16 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB03:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST04:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI05:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV06:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[C:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN2:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_LB116:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_ST117:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_NI118:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_IV120:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_TEMP_121:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX22:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX29:%.*]] = alloca i64, align 8 +// CHECK2-NEXT: [[V:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[CC:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[__RANGE264:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__BEGIN265:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[__END267:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: [[VV:%.*]] = alloca ptr, align 8 +// CHECK2-NEXT: store i32 0, ptr [[J]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: store i32 128, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[K]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: store i32 512, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP1]], [[TMP2]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP3]], %[[COND_TRUE]] ], [ [[TMP4]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB03]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST04]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK2-NEXT: [[CONV:%.*]] = sext i32 [[ADD]] to i64 +// CHECK2-NEXT: store i64 [[CONV]], ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: store i32 42, ptr [[C]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP8]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR]], ptr [[__END2]], align 8 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP9]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY7]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__RANGE2]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY9:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP10]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY9]], ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[__END2]], align 8 +// CHECK2-NEXT: store ptr [[TMP11]], ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_10]], align 8 +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[SUB_PTR_LHS_CAST:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK2-NEXT: [[SUB_PTR_RHS_CAST:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK2-NEXT: [[SUB_PTR_SUB:%.*]] = sub i64 [[SUB_PTR_LHS_CAST]], [[SUB_PTR_RHS_CAST]] +// CHECK2-NEXT: [[SUB_PTR_DIV:%.*]] = sdiv exact i64 [[SUB_PTR_SUB]], 8 +// CHECK2-NEXT: [[SUB12:%.*]] = sub nsw i64 [[SUB_PTR_DIV]], 1 +// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i64 [[SUB12]], 1 +// CHECK2-NEXT: [[DIV14:%.*]] = sdiv i64 [[ADD13]], 1 +// CHECK2-NEXT: [[SUB15:%.*]] = sub nsw i64 [[DIV14]], 1 +// CHECK2-NEXT: store i64 [[SUB15]], ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_LB116]], align 8 +// CHECK2-NEXT: store i64 1, ptr [[DOTOMP_ST117]], align 8 +// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_11]], align 8 +// CHECK2-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP14]], 1 +// CHECK2-NEXT: store i64 [[ADD19]], ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK2-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[CMP23:%.*]] = icmp sgt i64 [[TMP16]], [[TMP17]] +// CHECK2-NEXT: br i1 [[CMP23]], label %[[COND_TRUE24:.*]], label %[[COND_FALSE25:.*]] +// CHECK2: [[COND_TRUE24]]: +// CHECK2-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_TEMP_121]], align 8 +// CHECK2-NEXT: br label %[[COND_END26:.*]] +// CHECK2: [[COND_FALSE25]]: +// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: br label %[[COND_END26]] +// CHECK2: [[COND_END26]]: +// CHECK2-NEXT: [[COND27:%.*]] = phi i64 [ [[TMP18]], %[[COND_TRUE24]] ], [ [[TMP19]], %[[COND_FALSE25]] ] +// CHECK2-NEXT: store i64 [[COND27]], ptr [[DOTOMP_FUSE_MAX22]], align 8 +// CHECK2-NEXT: store i32 0, ptr [[I]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP20]], 128 +// CHECK2-NEXT: br i1 [[CMP28]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP21]]) +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP22]], 1 +// CHECK2-NEXT: store i32 [[INC]], ptr [[I]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: store i64 0, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND30:.*]] +// CHECK2: [[FOR_COND30]]: +// CHECK2-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_FUSE_MAX22]], align 8 +// CHECK2-NEXT: [[CMP31:%.*]] = icmp slt i64 [[TMP23]], [[TMP24]] +// CHECK2-NEXT: br i1 [[CMP31]], label %[[FOR_BODY32:.*]], label %[[FOR_END63:.*]] +// CHECK2: [[FOR_BODY32]]: +// CHECK2-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_NI05]], align 8 +// CHECK2-NEXT: [[CMP33:%.*]] = icmp slt i64 [[TMP25]], [[TMP26]] +// CHECK2-NEXT: br i1 [[CMP33]], label %[[IF_THEN:.*]], label %[[IF_END53:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB03]], align 4 +// CHECK2-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_ST04]], align 4 +// CHECK2-NEXT: [[CONV35:%.*]] = sext i32 [[TMP28]] to i64 +// CHECK2-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV35]], [[TMP29]] +// CHECK2-NEXT: [[ADD36:%.*]] = add nsw i64 [[CONV34]], [[MUL]] +// CHECK2-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 +// CHECK2-NEXT: store i32 [[CONV37]], ptr [[DOTOMP_IV06]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV06]], align 4 +// CHECK2-NEXT: [[MUL38:%.*]] = mul nsw i32 [[TMP30]], 1 +// CHECK2-NEXT: [[ADD39:%.*]] = add nsw i32 0, [[MUL38]] +// CHECK2-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP40:%.*]] = icmp slt i32 [[TMP31]], [[TMP32]] +// CHECK2-NEXT: br i1 [[CMP40]], label %[[IF_THEN41:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN41]]: +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL42:%.*]] = mul nsw i32 [[TMP34]], [[TMP35]] +// CHECK2-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP33]], [[MUL42]] +// CHECK2-NEXT: store i32 [[ADD43]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[MUL44:%.*]] = mul nsw i32 [[TMP36]], 2 +// CHECK2-NEXT: [[ADD45:%.*]] = add nsw i32 0, [[MUL44]] +// CHECK2-NEXT: store i32 [[ADD45]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP37]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP46:%.*]] = icmp slt i32 [[TMP38]], [[TMP39]] +// CHECK2-NEXT: br i1 [[CMP46]], label %[[IF_THEN47:.*]], label %[[IF_END52:.*]] +// CHECK2: [[IF_THEN47]]: +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL48:%.*]] = mul nsw i32 [[TMP41]], [[TMP42]] +// CHECK2-NEXT: [[ADD49:%.*]] = add nsw i32 [[TMP40]], [[MUL48]] +// CHECK2-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[MUL50:%.*]] = mul nsw i32 [[TMP43]], 1 +// CHECK2-NEXT: [[ADD51:%.*]] = add nsw i32 0, [[MUL50]] +// CHECK2-NEXT: store i32 [[ADD51]], ptr [[K]], align 4 +// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[K]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP44]]) +// CHECK2-NEXT: br label %[[IF_END52]] +// CHECK2: [[IF_END52]]: +// CHECK2-NEXT: br label %[[IF_END53]] +// CHECK2: [[IF_END53]]: +// CHECK2-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTOMP_NI118]], align 8 +// CHECK2-NEXT: [[CMP54:%.*]] = icmp slt i64 [[TMP45]], [[TMP46]] +// CHECK2-NEXT: br i1 [[CMP54]], label %[[IF_THEN55:.*]], label %[[IF_END60:.*]] +// CHECK2: [[IF_THEN55]]: +// CHECK2-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTOMP_LB116]], align 8 +// CHECK2-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTOMP_ST117]], align 8 +// CHECK2-NEXT: [[TMP49:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[MUL56:%.*]] = mul nsw i64 [[TMP48]], [[TMP49]] +// CHECK2-NEXT: [[ADD57:%.*]] = add nsw i64 [[TMP47]], [[MUL56]] +// CHECK2-NEXT: store i64 [[ADD57]], ptr [[DOTOMP_IV120]], align 8 +// CHECK2-NEXT: [[TMP50:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_8]], align 8 +// CHECK2-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV120]], align 8 +// CHECK2-NEXT: [[MUL58:%.*]] = mul nsw i64 [[TMP51]], 1 +// CHECK2-NEXT: [[ADD_PTR59:%.*]] = getelementptr inbounds double, ptr [[TMP50]], i64 [[MUL58]] +// CHECK2-NEXT: store ptr [[ADD_PTR59]], ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: [[TMP52:%.*]] = load ptr, ptr [[__BEGIN2]], align 8 +// CHECK2-NEXT: store ptr [[TMP52]], ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP53:%.*]] = load i32, ptr [[C]], align 4 +// CHECK2-NEXT: [[TMP54:%.*]] = load ptr, ptr [[V]], align 8 +// CHECK2-NEXT: [[TMP55:%.*]] = load double, ptr [[TMP54]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP53]], double noundef [[TMP55]]) +// CHECK2-NEXT: br label %[[IF_END60]] +// CHECK2: [[IF_END60]]: +// CHECK2-NEXT: br label %[[FOR_INC61:.*]] +// CHECK2: [[FOR_INC61]]: +// CHECK2-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: [[INC62:%.*]] = add nsw i64 [[TMP56]], 1 +// CHECK2-NEXT: store i64 [[INC62]], ptr [[DOTOMP_FUSE_INDEX29]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND30]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK2: [[FOR_END63]]: +// CHECK2-NEXT: store i32 37, ptr [[CC]], align 4 +// CHECK2-NEXT: store ptr [[ARR]], ptr [[__RANGE264]], align 8 +// CHECK2-NEXT: [[TMP57:%.*]] = load ptr, ptr [[__RANGE264]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY66:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP57]], i64 0, i64 0 +// CHECK2-NEXT: store ptr [[ARRAYDECAY66]], ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: [[TMP58:%.*]] = load ptr, ptr [[__RANGE264]], align 8 +// CHECK2-NEXT: [[ARRAYDECAY68:%.*]] = getelementptr inbounds [256 x double], ptr [[TMP58]], i64 0, i64 0 +// CHECK2-NEXT: [[ADD_PTR69:%.*]] = getelementptr inbounds double, ptr [[ARRAYDECAY68]], i64 256 +// CHECK2-NEXT: store ptr [[ADD_PTR69]], ptr [[__END267]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND70:.*]] +// CHECK2: [[FOR_COND70]]: +// CHECK2-NEXT: [[TMP59:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: [[TMP60:%.*]] = load ptr, ptr [[__END267]], align 8 +// CHECK2-NEXT: [[CMP71:%.*]] = icmp ne ptr [[TMP59]], [[TMP60]] +// CHECK2-NEXT: br i1 [[CMP71]], label %[[FOR_BODY72:.*]], label %[[FOR_END74:.*]] +// CHECK2: [[FOR_BODY72]]: +// CHECK2-NEXT: [[TMP61:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: store ptr [[TMP61]], ptr [[VV]], align 8 +// CHECK2-NEXT: [[TMP62:%.*]] = load i32, ptr [[CC]], align 4 +// CHECK2-NEXT: [[TMP63:%.*]] = load ptr, ptr [[VV]], align 8 +// CHECK2-NEXT: [[TMP64:%.*]] = load double, ptr [[TMP63]], align 8 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP62]], double noundef [[TMP64]]) +// CHECK2-NEXT: br label %[[FOR_INC73:.*]] +// CHECK2: [[FOR_INC73]]: +// CHECK2-NEXT: [[TMP65:%.*]] = load ptr, ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds nuw double, ptr [[TMP65]], i32 1 +// CHECK2-NEXT: store ptr [[INCDEC_PTR]], ptr [[__BEGIN265]], align 8 +// CHECK2-NEXT: br label %[[FOR_COND70]] +// CHECK2: [[FOR_END74]]: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define dso_local void @tfoo2( +// CHECK2-SAME: ) #[[ATTR0]] { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: call void @_Z4foo2IiEvT_S0_S0_(i32 noundef 0, i32 noundef 64, i32 noundef 4) +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define linkonce_odr void @_Z4foo2IiEvT_S0_S0_( +// CHECK2-SAME: i32 noundef [[START:%.*]], i32 noundef [[END:%.*]], i32 noundef [[STEP:%.*]]) #[[ATTR0]] comdat { +// CHECK2-NEXT: [[ENTRY:.*:]] +// CHECK2-NEXT: [[START_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[END_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[STEP_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV0:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_7:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP8:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_19:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTNEW_STEP21:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_ST2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_NI2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IV2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_TEMP_2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_MAX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_FUSE_INDEX:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 [[START]], ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[END]], ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[STEP]], ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP3]], ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], [[TMP5]] +// CHECK2-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], [[TMP6]] +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP7]] +// CHECK2-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: [[ADD5:%.*]] = add i32 [[TMP8]], 1 +// CHECK2-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP9]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP12]], ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_7]], align 4 +// CHECK2-NEXT: [[SUB10:%.*]] = sub i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], [[TMP15]] +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[DIV13:%.*]] = udiv i32 [[ADD12]], [[TMP16]] +// CHECK2-NEXT: [[SUB14:%.*]] = sub i32 [[DIV13]], 1 +// CHECK2-NEXT: store i32 [[SUB14]], ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK2-NEXT: [[ADD15:%.*]] = add i32 [[TMP17]], 1 +// CHECK2-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK2-NEXT: store i32 [[ADD16]], ptr [[K]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[START_ADDR]], align 4 +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK2-NEXT: store i32 [[ADD18]], ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[END_ADDR]], align 4 +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP22]], [[TMP23]] +// CHECK2-NEXT: store i32 [[ADD20]], ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[STEP_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP24]], ptr [[DOTNEW_STEP21]], align 4 +// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK2-NEXT: [[SUB23:%.*]] = sub i32 [[TMP25]], [[TMP26]] +// CHECK2-NEXT: [[SUB24:%.*]] = sub i32 [[SUB23]], 1 +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK2-NEXT: [[ADD25:%.*]] = add i32 [[SUB24]], [[TMP27]] +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK2-NEXT: [[DIV26:%.*]] = udiv i32 [[ADD25]], [[TMP28]] +// CHECK2-NEXT: [[SUB27:%.*]] = sub i32 [[DIV26]], 1 +// CHECK2-NEXT: store i32 [[SUB27]], ptr [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB2]], align 4 +// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_ST2]], align 4 +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK2-NEXT: [[ADD28:%.*]] = add i32 [[TMP29]], 1 +// CHECK2-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_NI2]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: store i32 [[TMP30]], ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP31]], [[TMP32]] +// CHECK2-NEXT: br i1 [[CMP]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]] +// CHECK2: [[COND_TRUE]]: +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_TEMP_1]], align 4 +// CHECK2-NEXT: br label %[[COND_END:.*]] +// CHECK2: [[COND_FALSE]]: +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: br label %[[COND_END]] +// CHECK2: [[COND_END]]: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP33]], %[[COND_TRUE]] ], [ [[TMP34]], %[[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK2-NEXT: [[CMP29:%.*]] = icmp ugt i32 [[TMP35]], [[TMP36]] +// CHECK2-NEXT: br i1 [[CMP29]], label %[[COND_TRUE30:.*]], label %[[COND_FALSE31:.*]] +// CHECK2: [[COND_TRUE30]]: +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_TEMP_2]], align 4 +// CHECK2-NEXT: br label %[[COND_END32:.*]] +// CHECK2: [[COND_FALSE31]]: +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK2-NEXT: br label %[[COND_END32]] +// CHECK2: [[COND_END32]]: +// CHECK2-NEXT: [[COND33:%.*]] = phi i32 [ [[TMP37]], %[[COND_TRUE30]] ], [ [[TMP38]], %[[COND_FALSE31]] ] +// CHECK2-NEXT: store i32 [[COND33]], ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND:.*]] +// CHECK2: [[FOR_COND]]: +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_FUSE_MAX]], align 4 +// CHECK2-NEXT: [[CMP34:%.*]] = icmp ult i32 [[TMP39]], [[TMP40]] +// CHECK2-NEXT: br i1 [[CMP34]], label %[[FOR_BODY:.*]], label %[[FOR_END:.*]] +// CHECK2: [[FOR_BODY]]: +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_NI0]], align 4 +// CHECK2-NEXT: [[CMP35:%.*]] = icmp ult i32 [[TMP41]], [[TMP42]] +// CHECK2-NEXT: br i1 [[CMP35]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +// CHECK2: [[IF_THEN]]: +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_LB0]], align 4 +// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_ST0]], align 4 +// CHECK2-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL:%.*]] = mul i32 [[TMP44]], [[TMP45]] +// CHECK2-NEXT: [[ADD36:%.*]] = add i32 [[TMP43]], [[MUL]] +// CHECK2-NEXT: store i32 [[ADD36]], ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_IV0]], align 4 +// CHECK2-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTNEW_STEP]], align 4 +// CHECK2-NEXT: [[MUL37:%.*]] = mul i32 [[TMP47]], [[TMP48]] +// CHECK2-NEXT: [[ADD38:%.*]] = add i32 [[TMP46]], [[MUL37]] +// CHECK2-NEXT: store i32 [[ADD38]], ptr [[I]], align 4 +// CHECK2-NEXT: [[TMP49:%.*]] = load i32, ptr [[I]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP49]]) +// CHECK2-NEXT: br label %[[IF_END]] +// CHECK2: [[IF_END]]: +// CHECK2-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_NI1]], align 4 +// CHECK2-NEXT: [[CMP39:%.*]] = icmp ult i32 [[TMP50]], [[TMP51]] +// CHECK2-NEXT: br i1 [[CMP39]], label %[[IF_THEN40:.*]], label %[[IF_END45:.*]] +// CHECK2: [[IF_THEN40]]: +// CHECK2-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTOMP_LB1]], align 4 +// CHECK2-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_ST1]], align 4 +// CHECK2-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL41:%.*]] = mul i32 [[TMP53]], [[TMP54]] +// CHECK2-NEXT: [[ADD42:%.*]] = add i32 [[TMP52]], [[MUL41]] +// CHECK2-NEXT: store i32 [[ADD42]], ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 +// CHECK2-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTOMP_IV1]], align 4 +// CHECK2-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTNEW_STEP8]], align 4 +// CHECK2-NEXT: [[MUL43:%.*]] = mul i32 [[TMP56]], [[TMP57]] +// CHECK2-NEXT: [[SUB44:%.*]] = sub i32 [[TMP55]], [[MUL43]] +// CHECK2-NEXT: store i32 [[SUB44]], ptr [[J]], align 4 +// CHECK2-NEXT: [[TMP58:%.*]] = load i32, ptr [[J]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP58]]) +// CHECK2-NEXT: br label %[[IF_END45]] +// CHECK2: [[IF_END45]]: +// CHECK2-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTOMP_NI2]], align 4 +// CHECK2-NEXT: [[CMP46:%.*]] = icmp ult i32 [[TMP59]], [[TMP60]] +// CHECK2-NEXT: br i1 [[CMP46]], label %[[IF_THEN47:.*]], label %[[IF_END52:.*]] +// CHECK2: [[IF_THEN47]]: +// CHECK2-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTOMP_LB2]], align 4 +// CHECK2-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTOMP_ST2]], align 4 +// CHECK2-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[MUL48:%.*]] = mul i32 [[TMP62]], [[TMP63]] +// CHECK2-NEXT: [[ADD49:%.*]] = add i32 [[TMP61]], [[MUL48]] +// CHECK2-NEXT: store i32 [[ADD49]], ptr [[DOTOMP_IV2]], align 4 +// CHECK2-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK2-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTOMP_IV2]], align 4 +// CHECK2-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTNEW_STEP21]], align 4 +// CHECK2-NEXT: [[MUL50:%.*]] = mul i32 [[TMP65]], [[TMP66]] +// CHECK2-NEXT: [[ADD51:%.*]] = add i32 [[TMP64]], [[MUL50]] +// CHECK2-NEXT: store i32 [[ADD51]], ptr [[K]], align 4 +// CHECK2-NEXT: [[TMP67:%.*]] = load i32, ptr [[K]], align 4 +// CHECK2-NEXT: call void (...) @body(i32 noundef [[TMP67]]) +// CHECK2-NEXT: br label %[[IF_END52]] +// CHECK2: [[IF_END52]]: +// CHECK2-NEXT: br label %[[FOR_INC:.*]] +// CHECK2: [[FOR_INC]]: +// CHECK2-NEXT: [[TMP68:%.*]] = load i32, ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add i32 [[TMP68]], 1 +// CHECK2-NEXT: store i32 [[INC]], ptr [[DOTOMP_FUSE_INDEX]], align 4 +// CHECK2-NEXT: br label %[[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK2: [[FOR_END]]: +// CHECK2-NEXT: ret void +// +//. +// CHECK1: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} +// CHECK1: [[META4]] = !{!"llvm.loop.mustprogress"} +// CHECK1: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]} +// CHECK1: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]} +// CHECK1: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]} +// CHECK1: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} +// CHECK1: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]} +// CHECK1: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]} +//. +// CHECK2: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} +// CHECK2: [[META4]] = !{!"llvm.loop.mustprogress"} +// CHECK2: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]} +// CHECK2: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]} +// CHECK2: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]} +// CHECK2: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} +// CHECK2: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]} +// CHECK2: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]} +//. diff --git a/clang/test/OpenMP/fuse_messages.cpp b/clang/test/OpenMP/fuse_messages.cpp new file mode 100644 index 0000000000000..b86ce95cfe9bc --- /dev/null +++ b/clang/test/OpenMP/fuse_messages.cpp @@ -0,0 +1,209 @@ +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -std=c++20 -fopenmp -fopenmp-version=60 -fsyntax-only -Wuninitialized -verify %s + +void func() { + + // expected-error@+2 {{statement after '#pragma omp fuse' must be a loop sequence containing canonical loops or loop-generating constructs}} + #pragma omp fuse + ; + + // expected-error@+2 {{statement after '#pragma omp fuse' must be a for loop}} + #pragma omp fuse + {int bar = 0;} + + // expected-error@+4 {{statement after '#pragma omp fuse' must be a for loop}} + #pragma omp fuse + { + for(int i = 0; i < 10; ++i); + int x = 2; + } + + // expected-error@+2 {{statement after '#pragma omp fuse' must be a loop sequence containing canonical loops or loop-generating constructs}} + #pragma omp fuse + #pragma omp for + for (int i = 0; i < 7; ++i) + ; + + { + // expected-error@+2 {{expected statement}} + #pragma omp fuse + } + + // expected-warning@+1 {{extra tokens at the end of '#pragma omp fuse' are ignored}} + #pragma omp fuse foo + { + for (int i = 0; i < 7; ++i) + ; + for(int j = 0; j < 100; ++j); + + } + + + // expected-error@+1 {{unexpected OpenMP clause 'final' in directive '#pragma omp fuse'}} + #pragma omp fuse final(0) + { + for (int i = 0; i < 7; ++i) + ; + for(int j = 0; j < 100; ++j); + + } + + //expected-error@+3 {{increment clause of OpenMP for loop must perform simple addition or subtraction on loop variable 'i'}} + #pragma omp fuse + { + for(int i = 0; i < 10; i*=2) { + ; + } + for(int j = 0; j < 100; ++j); + } + + //expected-error@+2 {{loop sequence after '#pragma omp fuse' must contain at least 1 canonical loop or loop-generating construct}} + #pragma omp fuse + {} + + //expected-error@+3 {{statement after '#pragma omp fuse' must be a for loop}} + #pragma omp fuse + { + #pragma omp unroll full + for(int i = 0; i < 10; ++i); + + for(int j = 0; j < 10; ++j); + } + + //expected-warning@+2 {{looprange clause selects a single loop, resulting in redundant fusion}} + #pragma omp fuse + { + for(int i = 0; i < 10; ++i); + } + + //expected-warning@+1 {{looprange clause selects a single loop, resulting in redundant fusion}} + #pragma omp fuse looprange(1, 1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + } + + //expected-error@+1 {{argument to 'looprange' clause must be a strictly positive integer value}} + #pragma omp fuse looprange(1, -1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + } + + //expected-error@+1 {{argument to 'looprange' clause must be a strictly positive integer value}} + #pragma omp fuse looprange(1, 0) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + } + + const int x = 1; + constexpr int y = 4; + //expected-error@+1 {{looprange clause selects loops from 1 to 4 but this exceeds the number of loops (3) in the loop sequence}} + #pragma omp fuse looprange(x,y) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + + //expected-error@+1 {{looprange clause selects loops from 1 to 420 but this exceeds the number of loops (3) in the loop sequence}} + #pragma omp fuse looprange(1,420) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + + //expected-error@+1 {{looprange clause selects loops from 1 to 6 but this exceeds the number of loops (5) in the loop sequence}} + #pragma omp fuse looprange(1,6) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + // This fusion results in 2 loops + #pragma omp fuse looprange(1,2) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + } + + //expected-error@+1 {{looprange clause selects loops from 2 to 4 but this exceeds the number of loops (3) in the loop sequence}} + #pragma omp fuse looprange(2,3) + { + #pragma omp unroll partial(2) + for(int i = 0; i < 10; ++i); + + #pragma omp reverse + for(int j = 0; j < 10; ++j); + + #pragma omp fuse + { + { + #pragma omp reverse + for(int j = 0; j < 10; ++j); + } + for(int k = 0; k < 50; ++k); + } + } +} + +// In a template context, but expression itself not instantiation-dependent +template +static void templated_func() { + + //expected-warning@+1 {{looprange clause selects a single loop, resulting in redundant fusion}} + #pragma omp fuse looprange(2,1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + + //expected-error@+1 {{looprange clause selects loops from 3 to 5 but this exceeds the number of loops (3) in the loop sequence}} + #pragma omp fuse looprange(3,3) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } + +} + +template +static void templated_func_value_dependent() { + + //expected-warning@+1 {{looprange clause selects a single loop, resulting in redundant fusion}} + #pragma omp fuse looprange(V,1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } +} + +template +static void templated_func_type_dependent() { + constexpr T s = 1; + + //expected-error@+1 {{argument to 'looprange' clause must be a strictly positive integer value}} + #pragma omp fuse looprange(s,s-1) + { + for(int i = 0; i < 10; ++i); + for(int j = 0; j < 100; ++j); + for(int k = 0; k < 50; ++k); + } +} + + +void template_inst() { + // expected-note@+1 {{in instantiation of function template specialization 'templated_func' requested here}} + templated_func(); + // expected-note@+1 {{in instantiation of function template specialization 'templated_func_value_dependent<1>' requested here}} + templated_func_value_dependent<1>(); + // expected-note@+1 {{in instantiation of function template specialization 'templated_func_type_dependent' requested here}} + templated_func_type_dependent(); +} + + diff --git a/clang/test/OpenMP/nvptx_parallel_num_threads_strict_messages.cpp b/clang/test/OpenMP/nvptx_parallel_num_threads_strict_messages.cpp new file mode 100644 index 0000000000000..a1a29fee5a69f --- /dev/null +++ b/clang/test/OpenMP/nvptx_parallel_num_threads_strict_messages.cpp @@ -0,0 +1,108 @@ +// RUN: %clang_cc1 -DF1 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF1 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null +// RUN: %clang_cc1 -DF2 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF2 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null +// RUN: %clang_cc1 -DF3 -verify -fopenmp -fopenmp-version=60 -triple x86_64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host-ppc.bc +// RUN: %clang_cc1 -DF3 -DTARGET -verify -fopenmp -fopenmp-version=60 -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host-ppc.bc -o /dev/null + +#ifndef TARGET +// expected-no-diagnostics +#endif + +#ifdef F3 +template +tx ftemplate(int n) { + tx a = 0; + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: tx(20)) severity(fatal) message("msg") + { + } + + short b = 1; +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: b) severity(warning) message("msg") + { + a += b; + } + + return a; +} +#endif + +#ifdef F2 +static +int fstatic(int n) { + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp target parallel num_threads(strict: n) message("msg") + { + } + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp target parallel num_threads(strict: 32+n) severity(warning) + { + } + + return n+1; +} +#endif + +#ifdef F1 +struct S1 { + double a; + + int r1(int n){ + int b = 1; + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: n-b) severity(warning) message("msg") + { + this->a = (double)b + 1.5; + } + +#ifdef TARGET + // expected-warning@+2 {{modifier 'strict' is currently not supported on a GPU for the 'num_threads' clause; modifier ignored}} +#endif + #pragma omp parallel num_threads(strict: 1024) severity(fatal) + { + this->a = 2.5; + } + + return (int)a; + } +}; +#endif + +int bar(int n){ + int a = 0; + +#ifdef F1 + #pragma omp target + { + S1 S; + a += S.r1(n); + } +#endif + +#ifdef F2 + a += fstatic(n); +#endif + +#ifdef F3 + #pragma omp target + a += ftemplate(n); +#endif + + return a; +} diff --git a/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp b/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp new file mode 100644 index 0000000000000..84e4b909d3f6a --- /dev/null +++ b/clang/test/OpenMP/target_parallel_num_threads_strict_codegen.cpp @@ -0,0 +1,1828 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ +// Test host codegen. +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 + +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" + +// Test target codegen - host bc file has to be created first. +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK9 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10 +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc +// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK11 +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s +// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12 + +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc +// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s +// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" + +// expected-no-diagnostics + +#ifndef HEADER +#define HEADER + +template +tx ftemplate(int n) { + tx a = 0; + + #pragma omp parallel num_threads(strict: tx(20)) severity(fatal) message("msg") + { + } + + short b = 1; + #pragma omp parallel num_threads(strict: b) severity(warning) message("msg") + { + a += b; + } + + return a; +} + +static +int fstatic(int n) { + + #pragma omp target parallel num_threads(strict: n) message("msg") + { + } + + #pragma omp target parallel num_threads(strict: 32+n) severity(warning) + { + } + + return n+1; +} + +struct S1 { + double a; + + int r1(int n){ + int b = 1; + + #pragma omp parallel num_threads(strict: n-b) severity(warning) message("msg") + { + this->a = (double)b + 1.5; + } + + #pragma omp parallel num_threads(strict: 1024) severity(fatal) + { + this->a = 2.5; + } + + return (int)a; + } +}; + +int bar(int n){ + int a = 0; + + #pragma omp target + { + S1 S; + a += S.r1(n); + } + + a += fstatic(n); + + #pragma omp target + a += ftemplate(n); + + return a; +} + +#endif +// CHECK1-LABEL: define {{[^@]+}}@_Z3bari +// CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[A_CASTED1:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[N_CASTED2:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[N_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 2, ptr [[TMP13]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP10]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 0, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP22]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP23]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP24]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK1-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95(i64 [[TMP1]], i64 [[TMP3]]) #[[ATTR2:[0-9]+]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP27]]) +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP28]], [[CALL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: store i32 [[TMP29]], ptr [[A_CASTED1]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[A_CASTED1]], align 8 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP31]], ptr [[N_CASTED2]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[N_CASTED2]], align 8 +// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP30]], ptr [[TMP33]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP30]], ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP38]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP41]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1 +// CHECK1-NEXT: store i32 2, ptr [[TMP42]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP39]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP40]], ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8 +// CHECK1-NEXT: store i64 0, ptr [[TMP49]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP50]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP51]], align 4 +// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP52]], align 4 +// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP53]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103.region_id, ptr [[KERNEL_ARGS6]]) +// CHECK1-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK1-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] +// CHECK1: omp_offload.failed7: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103(i64 [[TMP30]], i64 [[TMP32]]) #[[ATTR2]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT8]] +// CHECK1: omp_offload.cont8: +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: ret i32 [[TMP56]] +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR1:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP0]]) +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.3, ptr [[THIS1]]) +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 8 +// CHECK1-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK1-NEXT: ret i32 [[CONV]] +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici +// CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 +// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED3:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x ptr], align 8 +// CHECK1-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_1]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META11:![0-9]+]] +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META11]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP13]], 0 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 2, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP12]], ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.4, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 0, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] [[TMP14]], ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP13]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61(i64 [[TMP3]], ptr [[TMP4]]) #[[ATTR2]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP30]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8 +// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP33]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP38]], 0 +// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0 +// CHECK1-NEXT: store i32 3, ptr [[TMP40]], align 4 +// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP41]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP36]], ptr [[TMP42]], align 8 +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP37]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.6, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.7, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8 +// CHECK1-NEXT: store i64 0, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9 +// CHECK1-NEXT: store i64 0, ptr [[TMP49]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10 +// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP50]], align 4 +// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11 +// CHECK1-NEXT: store [3 x i32] [[TMP39]], ptr [[TMP51]], align 4 +// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12 +// CHECK1-NEXT: store i32 0, ptr [[TMP52]], align 4 +// CHECK1-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP38]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.region_id, ptr [[KERNEL_ARGS7]]) +// CHECK1-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK1-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]] +// CHECK1: omp_offload.failed8: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65(i64 [[TMP32]]) #[[ATTR2]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT9]] +// CHECK1: omp_offload.cont9: +// CHECK1-NEXT: [[TMP55:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP55]], 1 +// CHECK1-NEXT: ret i32 [[ADD10]] +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP0]]) +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK1-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.8, ptr [[A]], ptr [[B]]) +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: ret i32 [[TMP3]] +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META11]], !align [[META12:![0-9]+]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK1-NEXT: store double [[ADD]], ptr [[A]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.3 +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK1-NEXT: store double 2.500000e+00, ptr [[A]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META11]] +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META11]] +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK1-SAME: (i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.8 +// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META11]], !align [[META12]] +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META11]], !align [[META15:![0-9]+]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK1-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_Z3bari +// CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[A_CASTED1:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[N_CASTED2:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 2, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP10]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP17]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 0, ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP21]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1:[0-9]+]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK3-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95(i32 [[TMP1]], i32 [[TMP3]]) #[[ATTR2:[0-9]+]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP27]]) +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP28]], [[CALL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: store i32 [[TMP29]], ptr [[A_CASTED1]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[A_CASTED1]], align 4 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[TMP31]], ptr [[N_CASTED2]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[N_CASTED2]], align 4 +// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP30]], ptr [[TMP33]], align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP30]], ptr [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP35]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[TMP36]], align 4 +// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[TMP37]], align 4 +// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP38]], align 4 +// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP41]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 1 +// CHECK3-NEXT: store i32 2, ptr [[TMP42]], align 4 +// CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP39]], ptr [[TMP43]], align 4 +// CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP40]], ptr [[TMP44]], align 4 +// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.1, ptr [[TMP45]], align 4 +// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP46]], align 4 +// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP47]], align 4 +// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP48]], align 4 +// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 8 +// CHECK3-NEXT: store i64 0, ptr [[TMP49]], align 8 +// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP50]], align 8 +// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP51]], align 4 +// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP52]], align 4 +// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS6]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP53]], align 4 +// CHECK3-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 -1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103.region_id, ptr [[KERNEL_ARGS6]]) +// CHECK3-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK3-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] +// CHECK3: omp_offload.failed7: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103(i32 [[TMP30]], i32 [[TMP32]]) #[[ATTR2]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT8]] +// CHECK3: omp_offload.cont8: +// CHECK3-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: ret i32 [[TMP56]] +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR1:[0-9]+]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 +// CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP0]]) +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.3, ptr [[THIS1]]) +// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 4 +// CHECK3-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK3-NEXT: ret i32 [[CONV]] +// +// +// CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici +// CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4 +// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED3:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [1 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [1 x ptr], align 4 +// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [1 x ptr], align 4 +// CHECK3-NEXT: [[KERNEL_ARGS7:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 4, !nonnull [[META12:![0-9]+]] +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META12]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP13]], 0 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 2, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP17]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP12]], ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.4, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 0, ptr [[TMP23]], align 8 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP24]], align 8 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] [[TMP14]], ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP27]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP13]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 +// CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3: omp_offload.failed: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61(i32 [[TMP3]], ptr [[TMP4]]) #[[ATTR2]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK3: omp_offload.cont: +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 32, [[TMP30]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[TMP33]], align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP35]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP39:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP38]], 0 +// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 0 +// CHECK3-NEXT: store i32 3, ptr [[TMP40]], align 4 +// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP41]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP36]], ptr [[TMP42]], align 4 +// CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP37]], ptr [[TMP43]], align 4 +// CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.6, ptr [[TMP44]], align 4 +// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.7, ptr [[TMP45]], align 4 +// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP46]], align 4 +// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP47]], align 4 +// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 8 +// CHECK3-NEXT: store i64 0, ptr [[TMP48]], align 8 +// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 9 +// CHECK3-NEXT: store i64 0, ptr [[TMP49]], align 8 +// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 10 +// CHECK3-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP50]], align 4 +// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 11 +// CHECK3-NEXT: store [3 x i32] [[TMP39]], ptr [[TMP51]], align 4 +// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS7]], i32 0, i32 12 +// CHECK3-NEXT: store i32 0, ptr [[TMP52]], align 4 +// CHECK3-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB1]], i64 -1, i32 1, i32 [[TMP38]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.region_id, ptr [[KERNEL_ARGS7]]) +// CHECK3-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK3-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED8:%.*]], label [[OMP_OFFLOAD_CONT9:%.*]] +// CHECK3: omp_offload.failed8: +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65(i32 [[TMP32]]) #[[ATTR2]] +// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT9]] +// CHECK3: omp_offload.cont9: +// CHECK3-NEXT: [[TMP55:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP55]], 1 +// CHECK3-NEXT: ret i32 [[ADD10]] +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP0]]) +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK3-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.8, ptr [[A]], ptr [[B]]) +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: ret i32 [[TMP3]] +// +// +// CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META13:![0-9]+]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK3-NEXT: store double [[ADD]], ptr [[A]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.3 +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK3-NEXT: store double 2.500000e+00, ptr [[A]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META12]] +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META12]] +// CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK3-SAME: (i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.8 +// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] { +// CHECK3-NEXT: entry: +// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META12]], !align [[META13]] +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META12]], !align [[META16:![0-9]+]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK3-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[TMP:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) +// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META12:![0-9]+]] +// CHECK9-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META12]] +// CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0 +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 +// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP0]]) +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK9-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR2:[0-9]+]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.1, ptr [[THIS1]]) +// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK9-NEXT: ret i32 [[CONV]] +// +// +// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK9-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP0]]) +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR2]] comdat { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK9-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK9-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK9-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.2, ptr [[A]], ptr [[B]]) +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: ret i32 [[TMP3]] +// +// +// CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META15:![0-9]+]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK9-NEXT: store double [[ADD]], ptr [[A]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.1 +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK9-NEXT: store double 2.500000e+00, ptr [[A]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: ret void +// +// +// CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.2 +// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] { +// CHECK9-NEXT: entry: +// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META12]], !align [[META15]] +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META16:![0-9]+]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK9-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 +// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP0]]) +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK10-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK10-SAME: (ptr noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR1:[0-9]+]] comdat { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) +// CHECK10-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.1, ptr [[THIS1]]) +// CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 8 +// CHECK10-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK10-NEXT: ret i32 [[CONV]] +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP0]]) +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK10-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK10-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR1]] comdat { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK10-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK10-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK10-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK10-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK10-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.2, ptr [[A]], ptr [[B]]) +// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK10-NEXT: ret i32 [[TMP3]] +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[TMP:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 +// CHECK10-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META12:![0-9]+]] +// CHECK10-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 8 +// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK10-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META12]] +// CHECK10-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i64 0, i64 0 +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8 +// CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK10-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META15:![0-9]+]] +// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK10-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK10-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK10-NEXT: store double [[ADD]], ptr [[A]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.1 +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK10-NEXT: store double 2.500000e+00, ptr [[A]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: ret void +// +// +// CHECK10-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.2 +// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] { +// CHECK10-NEXT: entry: +// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8 +// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 +// CHECK10-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 +// CHECK10-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nonnull [[META12]], !align [[META15]] +// CHECK10-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nonnull [[META12]], !align [[META16:![0-9]+]] +// CHECK10-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK10-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK10-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK10-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[TMP:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) +// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META13:![0-9]+]] +// CHECK11-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META13]] +// CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i32 0, i32 0 +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 +// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP0]]) +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK11-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR2:[0-9]+]] comdat align 2 { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.1, ptr [[THIS1]]) +// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 4 +// CHECK11-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK11-NEXT: ret i32 [[CONV]] +// +// +// CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK11-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP0]]) +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR2]] comdat { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK11-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK11-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK11-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.2, ptr [[A]], ptr [[B]]) +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: ret i32 [[TMP3]] +// +// +// CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META13]], !align [[META16:![0-9]+]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK11-NEXT: store double [[ADD]], ptr [[A]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.1 +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK11-NEXT: store double 2.500000e+00, ptr [[A]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.2 +// CHECK11-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] { +// CHECK11-NEXT: entry: +// CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META13]], !align [[META16]] +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META13]], !align [[META17:![0-9]+]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK11-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l95 +// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 +// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP0]]) +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK12-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei +// CHECK12-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) +// CHECK12-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: store i32 1, ptr [[B]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[B]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]] +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[SUB]], i32 1, ptr @.str) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_ZN2S12r1Ei.omp_outlined, ptr [[THIS1]], ptr [[B]]) +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 1024, i32 2, ptr null) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @_ZN2S12r1Ei.omp_outlined.1, ptr [[THIS1]]) +// CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP3:%.*]] = load double, ptr [[A]], align 4 +// CHECK12-NEXT: [[CONV:%.*]] = fptosi double [[TMP3]] to i32 +// CHECK12-NEXT: ret i32 [[CONV]] +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3bari_l103 +// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP0]]) +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] +// CHECK12-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i +// CHECK12-SAME: (i32 noundef [[N:%.*]]) #[[ATTR1]] comdat { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[B:%.*]] = alloca i16, align 2 +// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 +// CHECK12-NEXT: store i32 0, ptr [[A]], align 4 +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 20, i32 2, ptr @.str) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @_Z9ftemplateIiET_i.omp_outlined) +// CHECK12-NEXT: store i16 1, ptr [[B]], align 2 +// CHECK12-NEXT: [[TMP1:%.*]] = load i16, ptr [[B]], align 2 +// CHECK12-NEXT: [[TMP2:%.*]] = sext i16 [[TMP1]] to i32 +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 1, ptr @.str) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @_Z9ftemplateIiET_i.omp_outlined.2, ptr [[A]], ptr [[B]]) +// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK12-NEXT: ret i32 [[TMP3]] +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61 +// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[TMP:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !nonnull [[META13:![0-9]+]] +// CHECK12-NEXT: store ptr [[TMP1]], ptr [[TMP]], align 4 +// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK12-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 4, !nonnull [[META13]] +// CHECK12-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP3]], i32 0, i32 0 +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP2]], i32 2, ptr [[ARRAYDECAY]]) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined) +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l61.omp_outlined +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65 +// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) +// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4 +// CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK12-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 1, ptr null) +// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined) +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l65.omp_outlined +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META13]], !align [[META16:![0-9]+]] +// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK12-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to double +// CHECK12-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 +// CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK12-NEXT: store double [[ADD]], ptr [[A]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_ZN2S12r1Ei.omp_outlined.1 +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef [[THIS:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK12-NEXT: store double 2.500000e+00, ptr [[A]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: ret void +// +// +// CHECK12-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i.omp_outlined.2 +// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] { +// CHECK12-NEXT: entry: +// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4 +// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 +// CHECK12-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 +// CHECK12-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 +// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nonnull [[META13]], !align [[META16]] +// CHECK12-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nonnull [[META13]], !align [[META17:![0-9]+]] +// CHECK12-NEXT: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// CHECK12-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 +// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[CONV]] +// CHECK12-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 +// CHECK12-NEXT: ret void +// diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp deleted file mode 100644 index 02b712679ad19..0000000000000 --- a/clang/test/OpenMP/teams_distribute_parallel_for_num_threads_strict_codegen.cpp +++ /dev/null @@ -1,1447 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ -// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1 -// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 - -// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" -// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" - -// expected-no-diagnostics -#ifndef HEADER -#define HEADER - -typedef __INTPTR_TYPE__ intptr_t; - - -void foo(); - -struct S { - intptr_t a, b, c; - S(intptr_t a) : a(a) {} - operator char() { return a; } - ~S() {} -}; - -template -int tmain() { - char str[] = "msg1"; -#pragma omp target -#pragma omp teams distribute parallel for num_threads(strict: C) severity(fatal) message("msg") - for (int i = 0; i < 100; i++) - foo(); -#pragma omp target -#pragma omp teams distribute parallel for num_threads(strict: T(23)) severity(warning) message(str) - for (int i = 0; i < 100; i++) - foo(); - return 0; -} - -int main() { - S s(0); - char a = s; - char str[] = "msg2"; -#pragma omp target -#pragma omp teams distribute parallel for num_threads(strict: 2) severity(warning) message("msg") - for (int i = 0; i < 100; i++) { - foo(); - } -#pragma omp target - -#pragma omp teams distribute parallel for num_threads(strict: a) severity(fatal) message(str) - for (int i = 0; i < 100; i++) { - foo(); - } - return a + tmain() + tmain(); -} - -#endif -// CHECK1-LABEL: define {{[^@]+}}@main -// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const.main.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 2, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 2, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44() #[[ATTR5:[0-9]+]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: lpad: -// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: cleanup -// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5]] -// CHECK1-NEXT: br label [[EH_RESUME:%.*]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP28:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP28]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP30:%.*]] = zext i8 [[TMP29]] to i32 -// CHECK1-NEXT: [[TMP31:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP30]], 0 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 2, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP26]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP36]], align 8 -// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP40]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP42]], align 4 -// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [[TMP31]], ptr [[TMP43]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP44]], align 4 -// CHECK1-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP30]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 -// CHECK1-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49(i64 [[TMP19]], ptr [[STR]]) #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: [[TMP47:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP47]] to i32 -// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont5: -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont7: -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR5]] -// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: ret i32 [[TMP48]] -// CHECK1: eh.resume: -// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK1-NEXT: ret i8 [[CONV]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44 -// CHECK1-SAME: () #[[ATTR4:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15:![0-9]+]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 2, i32 1, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8:[0-9]+]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR7:[0-9]+]] comdat { -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR5]] -// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49 -// CHECK1-SAME: (i64 noundef [[A:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[A_ADDR]], align 1 -// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined, i64 [[TMP3]], ptr [[TMP4]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 -// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP10]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 2, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK1-SAME: () #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 5, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 5, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29() #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 23, i32 0, i32 0], ptr [[TMP31]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 23, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33(ptr [[STR]]) #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK1-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [5 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 5, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29() #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR5]] -// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP21:%.*]] = zext i8 [[TMP20]] to i32 -// CHECK1-NEXT: [[TMP22:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP21]], 0 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP23]], align 4 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP24]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [[TMP22]], ptr [[TMP34]], align 4 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP35]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP21]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0 -// CHECK1-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33(ptr [[STR]]) #[[ATTR5]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP38:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP39:%.*]] = extractvalue { ptr, i32 } [[TMP38]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP39]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR5]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29 -// CHECK1-SAME: () #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 5, i32 2, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33 -// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined, ptr [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 1, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29 -// CHECK1-SAME: () #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 2, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33 -// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(5) [[STR:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR5]] -// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined, i64 [[TMP2]], ptr [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP4]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP5]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(5) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR4]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 -// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [5 x i8], ptr [[TMP10]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 1, ptr [[ARRAYDECAY]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]]) -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR4]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP11:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP12:%.*]] = extractvalue { ptr, i32 } [[TMP11]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP12]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: ret void -// diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp deleted file mode 100644 index 559cfeef49080..0000000000000 --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_num_threads_strict_codegen.cpp +++ /dev/null @@ -1,1911 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ -// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK1 -// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 - -// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s --check-prefix=CHECK3 -// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -emit-pch -o %t %s -// RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=60 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple powerpc64le-ibm-linux-gnu -fexceptions -fcxx-exceptions -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 - -// expected-no-diagnostics -#ifndef HEADER -#define HEADER - -typedef __INTPTR_TYPE__ intptr_t; - - -void foo(); - -struct S { - intptr_t a, b, c; - S(intptr_t a) : a(a) {} - operator char() { return a; } - ~S() {} -}; - -template -int tmain() { - char str[] = "msg"; -#pragma omp target -#pragma omp teams distribute parallel for simd num_threads(strict: C) severity(warning) message("msg") - for (int i = 0; i < 100; i++) - foo(); -#pragma omp target -#pragma omp teams distribute parallel for simd num_threads(strict: T(23)) severity(fatal) message(str) - for (int i = 0; i < 100; i++) - foo(); - return 0; -} - -int main() { - S s(0); - char a = s; - const char *str = "msg"; -#pragma omp target -#pragma omp teams distribute parallel for simd num_threads(strict: 2) severity(fatal) message("msg") - for (int i = 0; i < 100; i++) { - foo(); - } -#pragma omp target - -#pragma omp teams distribute parallel for simd num_threads(strict: a) severity(warning) message(str) - for (int i = 0; i < 100; i++) { - foo(); - } - return a + tmain() + tmain(); -} - -#endif -// CHECK1-LABEL: define {{[^@]+}}@main -// CHECK1-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK1-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK1-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 2, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 2, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44() #[[ATTR4:[0-9]+]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: lpad: -// CHECK1-NEXT: [[TMP15:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: cleanup -// CHECK1-NEXT: [[TMP16:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 0 -// CHECK1-NEXT: store ptr [[TMP16]], ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = extractvalue { ptr, i32 } [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[TMP17]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR4]] -// CHECK1-NEXT: br label [[EH_RESUME:%.*]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP18]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[STR]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP29:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP29]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP31:%.*]] = zext i8 [[TMP30]] to i32 -// CHECK1-NEXT: [[TMP32:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP31]], 0 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 2, ptr [[TMP34]], align 4 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP28]], ptr [[TMP36]], align 8 -// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP40]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP43]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [[TMP32]], ptr [[TMP44]], align 4 -// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP45]], align 4 -// CHECK1-NEXT: [[TMP46:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP31]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP47:%.*]] = icmp ne i32 [[TMP46]], 0 -// CHECK1-NEXT: br i1 [[TMP47]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49(i64 [[TMP19]], ptr [[TMP20]]) #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: [[TMP48:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP48]] to i32 -// CHECK1-NEXT: [[CALL6:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont5: -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CALL6]] -// CHECK1-NEXT: [[CALL8:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv() -// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD]] -// CHECK1: invoke.cont7: -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[ADD]], [[CALL8]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[RETVAL]], align 4 -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR4]] -// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: ret i32 [[TMP49]] -// CHECK1: eh.resume: -// CHECK1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK1-NEXT: [[LPAD_VAL10:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK1-NEXT: resume { ptr, i32 } [[LPAD_VAL10]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK1-NEXT: ret i8 [[CONV]] -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44 -// CHECK1-SAME: () #[[ATTR3:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15:![0-9]+]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 2, i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l44.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP20]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8:[0-9]+]], !llvm.access.group [[ACC_GRP20]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR6:[0-9]+]] comdat { -// CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR4]] -// CHECK1-NEXT: call void @_ZSt9terminatev() #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49 -// CHECK1-SAME: (i64 noundef [[A:%.*]], ptr noundef [[STR:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1 -// CHECK1-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined, i64 [[TMP3]], ptr [[TMP4]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] -// CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP8:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP1]], i32 [[TMP8]], i32 1, ptr [[TMP9]]), !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined, i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l49.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP28]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP28]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK1-SAME: () #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 4, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 5, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 5, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29() #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP30]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 23, i32 0, i32 0], ptr [[TMP31]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 23, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 -// CHECK1-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33(ptr [[STR]]) #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// -// -// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK1-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[KERNEL_ARGS2:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 4, i1 false) -// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP12]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 -// CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] -// CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29() #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] -// CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[STR]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR4]] -// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP21:%.*]] = zext i8 [[TMP20]] to i32 -// CHECK1-NEXT: [[TMP22:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP21]], 0 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 0 -// CHECK1-NEXT: store i32 3, ptr [[TMP23]], align 4 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP24]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 9 -// CHECK1-NEXT: store i64 0, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 10 -// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 11 -// CHECK1-NEXT: store [3 x i32] [[TMP22]], ptr [[TMP34]], align 4 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds nuw [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS2]], i32 0, i32 12 -// CHECK1-NEXT: store i32 0, ptr [[TMP35]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 [[TMP21]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.region_id, ptr [[KERNEL_ARGS2]]) -// CHECK1-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0 -// CHECK1-NEXT: br i1 [[TMP37]], label [[OMP_OFFLOAD_FAILED3:%.*]], label [[OMP_OFFLOAD_CONT4:%.*]] -// CHECK1: omp_offload.failed3: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33(ptr [[STR]]) #[[ATTR4]] -// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT4]] -// CHECK1: omp_offload.cont4: -// CHECK1-NEXT: ret i32 0 -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP38:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP39:%.*]] = extractvalue { ptr, i32 } [[TMP38]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP39]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29 -// CHECK1-SAME: () #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 5, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l29.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP34]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP34]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33 -// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(4) [[STR:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined, ptr [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 23, i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIcLi5EEiv_l33.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP40]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP40]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29 -// CHECK1-SAME: () #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined, ptr [[TMP0]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP8]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 1, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined, i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 -// CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l29.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP46]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP46]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33 -// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(4) [[STR:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[STR_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: store ptr [[STR]], ptr [[STR_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[STR_ADDR]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR4]] -// CHECK1-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTCAPTURE_EXPR_1]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_1]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined, i64 [[TMP2]], ptr [[TMP3]]) -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP4:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP4]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP5]]) #[[ATTR8]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], ptr noundef nonnull align 1 dereferenceable(4) [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR3]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8, !nonnull [[META15]] -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP9:%.*]] = sext i8 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP]], align 8, !nonnull [[META15]], !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [4 x i8], ptr [[TMP10]], i64 0, i64 0 -// CHECK1-NEXT: call void @__kmpc_push_num_threads_strict(ptr @[[GLOB3]], i32 [[TMP2]], i32 [[TMP9]], i32 2, ptr [[ARRAYDECAY]]), !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined, i64 [[TMP12]], i64 [[TMP14]]), !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK1-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainI1SLi1EEiv_l33.omp_outlined.omp_outlined -// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR3]] personality ptr @__gxx_personality_v0 { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 -// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 -// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 -// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] -// CHECK1: cond.true: -// CHECK1-NEXT: br label [[COND_END:%.*]] -// CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: br label [[COND_END]] -// CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] -// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] -// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: invoke void @_Z3foov() -// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP52]] -// CHECK1: invoke.cont: -// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK1: omp.body.continue: -// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] -// CHECK1: omp.inner.for.end: -// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] -// CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] -// CHECK1: .omp.final.then: -// CHECK1-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] -// CHECK1: .omp.final.done: -// CHECK1-NEXT: ret void -// CHECK1: terminate.lpad: -// CHECK1-NEXT: [[TMP13:%.*]] = landingpad { ptr, i32 } -// CHECK1-NEXT: catch ptr null -// CHECK1-NEXT: [[TMP14:%.*]] = extractvalue { ptr, i32 } [[TMP13]], 0 -// CHECK1-NEXT: call void @__clang_call_terminate(ptr [[TMP14]]) #[[ATTR8]], !llvm.access.group [[ACC_GRP52]] -// CHECK1-NEXT: unreachable -// -// -// CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@main -// CHECK3-SAME: () #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: [[A:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[STR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB7:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB8:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV9:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I10:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[S]], i64 noundef 0) -// CHECK3-NEXT: [[CALL:%.*]] = invoke noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: store i8 [[CALL]], ptr [[A]], align 1 -// CHECK3-NEXT: store ptr @.str, ptr [[STR]], align 8 -// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2:![0-9]+]] -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP3]] -// CHECK3: invoke.cont2: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] -// CHECK3: lpad: -// CHECK3-NEXT: [[TMP6:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: cleanup -// CHECK3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 0 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 1 -// CHECK3-NEXT: store i32 [[TMP8]], ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6:[0-9]+]] -// CHECK3-NEXT: br label [[EH_RESUME:%.*]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: store i8 [[TMP9]], ptr [[DOTCAPTURE_EXPR_4]], align 1 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[STR]], align 8 -// CHECK3-NEXT: store ptr [[TMP10]], ptr [[DOTCAPTURE_EXPR_5]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB7]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB8]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB7]], align 4 -// CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV9]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND11:%.*]] -// CHECK3: omp.inner.for.cond11: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB8]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: [[CMP12:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END20:%.*]] -// CHECK3: omp.inner.for.body13: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: [[MUL14:%.*]] = mul nsw i32 [[TMP14]], 1 -// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]] -// CHECK3-NEXT: store i32 [[ADD15]], ptr [[I10]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT16:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP7]] -// CHECK3: invoke.cont16: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE17:%.*]] -// CHECK3: omp.body.continue17: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC18:%.*]] -// CHECK3: omp.inner.for.inc18: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK3-NEXT: store i32 [[ADD19]], ptr [[DOTOMP_IV9]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND11]], !llvm.loop [[LOOP8:![0-9]+]] -// CHECK3: omp.inner.for.end20: -// CHECK3-NEXT: store i32 100, ptr [[I10]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP16]] to i32 -// CHECK3-NEXT: [[CALL22:%.*]] = invoke noundef signext i32 @_Z5tmainIcLi5EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT21:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont21: -// CHECK3-NEXT: [[ADD23:%.*]] = add nsw i32 [[CONV]], [[CALL22]] -// CHECK3-NEXT: [[CALL25:%.*]] = invoke noundef signext i32 @_Z5tmainI1SLi1EEiv() -// CHECK3-NEXT: to label [[INVOKE_CONT24:%.*]] unwind label [[LPAD]] -// CHECK3: invoke.cont24: -// CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[ADD23]], [[CALL25]] -// CHECK3-NEXT: store i32 [[ADD26]], ptr [[RETVAL]], align 4 -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[S]]) #[[ATTR6]] -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: ret i32 [[TMP17]] -// CHECK3: eh.resume: -// CHECK3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8 -// CHECK3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4 -// CHECK3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0 -// CHECK3-NEXT: [[LPAD_VAL27:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1 -// CHECK3-NEXT: resume { ptr, i32 } [[LPAD_VAL27]] -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP18:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP19:%.*]] = extractvalue { ptr, i32 } [[TMP18]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP19]]) #[[ATTR7:[0-9]+]], !llvm.access.group [[ACC_GRP3]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1El -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SC2El(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i64 noundef [[TMP0]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1ScvcEv -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) #[[ATTR2:[0-9]+]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A]], align 8 -// CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i8 -// CHECK3-NEXT: ret i8 [[CONV]] -// -// -// CHECK3-LABEL: define {{[^@]+}}@__clang_call_terminate -// CHECK3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] comdat { -// CHECK3-NEXT: [[TMP2:%.*]] = call ptr @__cxa_begin_catch(ptr [[TMP0]]) #[[ATTR6]] -// CHECK3-NEXT: call void @_ZSt9terminatev() #[[ATTR7]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIcLi5EEiv -// CHECK3-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP4:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP5:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB6:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB7:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV8:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I9:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainIcLi5EEiv.str, i64 4, i1 false) -// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2]] -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP10]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_3]], align 8, !nonnull [[META2]] -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[_TMP4]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB6]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB7]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB6]], align 4 -// CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV8]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] -// CHECK3: omp.inner.for.cond10: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB7]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] -// CHECK3-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]] -// CHECK3: omp.inner.for.body12: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] -// CHECK3-NEXT: store i32 [[ADD14]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP13]] -// CHECK3: invoke.cont15: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] -// CHECK3: omp.body.continue16: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] -// CHECK3: omp.inner.for.inc17: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV8]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP14:![0-9]+]] -// CHECK3: omp.inner.for.end19: -// CHECK3-NEXT: store i32 100, ptr [[I9]], align 4 -// CHECK3-NEXT: ret i32 0 -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP12:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP13:%.*]] = extractvalue { ptr, i32 } [[TMP12]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP13]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP10]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainI1SLi1EEiv -// CHECK3-SAME: () #[[ATTR2]] comdat personality ptr @__gxx_personality_v0 { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[STR:%.*]] = alloca [4 x i8], align 1 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[TMP:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 8 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP6:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_LB8:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_UB9:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTOMP_IV10:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[I11:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[STR]], ptr align 1 @__const._Z5tmainI1SLi1EEiv.str, i64 4, i1 false) -// CHECK3-NEXT: store ptr @.str, ptr [[DOTCAPTURE_EXPR_]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_]], align 8, !nonnull [[META2]] -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] -// CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] -// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] -// CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[TERMINATE_LPAD:%.*]], !llvm.access.group [[ACC_GRP16]] -// CHECK3: invoke.cont: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] -// CHECK3: omp.body.continue: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] -// CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] -// CHECK3: omp.inner.for.end: -// CHECK3-NEXT: store i32 100, ptr [[I]], align 4 -// CHECK3-NEXT: invoke void @_ZN1SC1El(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]], i64 noundef 23) -// CHECK3-NEXT: to label [[INVOKE_CONT4:%.*]] unwind label [[TERMINATE_LPAD]] -// CHECK3: invoke.cont4: -// CHECK3-NEXT: [[CALL:%.*]] = call noundef signext i8 @_ZN1ScvcEv(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) -// CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[REF_TMP]]) #[[ATTR6]] -// CHECK3-NEXT: store i8 [[CALL]], ptr [[DOTCAPTURE_EXPR_3]], align 1 -// CHECK3-NEXT: store ptr [[STR]], ptr [[DOTCAPTURE_EXPR_5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTCAPTURE_EXPR_5]], align 8, !nonnull [[META2]] -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[_TMP6]], align 8 -// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB8]], align 4 -// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB9]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB8]], align 4 -// CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV10]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND12:%.*]] -// CHECK3: omp.inner.for.cond12: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB9]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: [[CMP13:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] -// CHECK3-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY14:%.*]], label [[OMP_INNER_FOR_END21:%.*]] -// CHECK3: omp.inner.for.body14: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: [[MUL15:%.*]] = mul nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 0, [[MUL15]] -// CHECK3-NEXT: store i32 [[ADD16]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: invoke void @_Z3foov() -// CHECK3-NEXT: to label [[INVOKE_CONT17:%.*]] unwind label [[TERMINATE_LPAD]], !llvm.access.group [[ACC_GRP19]] -// CHECK3: invoke.cont17: -// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE18:%.*]] -// CHECK3: omp.body.continue18: -// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC19:%.*]] -// CHECK3: omp.inner.for.inc19: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: [[ADD20:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD20]], ptr [[DOTOMP_IV10]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND12]], !llvm.loop [[LOOP20:![0-9]+]] -// CHECK3: omp.inner.for.end21: -// CHECK3-NEXT: store i32 100, ptr [[I11]], align 4 -// CHECK3-NEXT: ret i32 0 -// CHECK3: terminate.lpad: -// CHECK3-NEXT: [[TMP12:%.*]] = landingpad { ptr, i32 } -// CHECK3-NEXT: catch ptr null -// CHECK3-NEXT: [[TMP13:%.*]] = extractvalue { ptr, i32 } [[TMP12]], 0 -// CHECK3-NEXT: call void @__clang_call_terminate(ptr [[TMP13]]) #[[ATTR7]], !llvm.access.group [[ACC_GRP16]] -// CHECK3-NEXT: unreachable -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: call void @_ZN1SD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR6]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2El -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i64 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds nuw [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: store i64 [[TMP0]], ptr [[A2]], align 8 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev -// CHECK3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 -// CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: ret void -// diff --git a/clang/test/SemaCXX/ctad.cpp b/clang/test/SemaCXX/ctad.cpp index 8380b564bdcdd..7de7f50337e8c 100644 --- a/clang/test/SemaCXX/ctad.cpp +++ b/clang/test/SemaCXX/ctad.cpp @@ -190,3 +190,10 @@ namespace GH136624 { foo::Alias t = 0; // expected-error@-1 {{no viable conversion from 'int' to 'GH136624::A' (aka 'A')}} } // namespace GH136624 + +namespace GH131342 { + template constexpr int val{0}; + template struct A { A(T) {} }; + template using AA = A>; + AA a{0}; +} // namespace GH131342 diff --git a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp index 1f4d44218ad1f..2f1817d0ca7eb 100644 --- a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp +++ b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp @@ -113,7 +113,7 @@ using Bar = Foo; // expected-note {{candidate template ignored: co // expected-note {{implicit deduction guide declared as 'template requires __is_deducible(test9::Bar, test9::Foo) Bar(test9::Foo) -> test9::Foo'}} \ // expected-note {{implicit deduction guide declared as 'template requires __is_deducible(test9::Bar, test9::Foo) Bar(const X (&)[sizeof(X)]) -> test9::Foo'}} \ // expected-note {{candidate template ignored: constraints not satisfied [with X = int]}} \ - // expected-note {{cannot deduce template arguments for 'test9::Bar' from 'test9::Foo'}} + // expected-note {{cannot deduce template arguments for 'test9::Bar' from 'test9::Foo'}} Bar s = {{1}}; // expected-error {{no viable constructor or deduction guide }} diff --git a/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp b/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp index 097ada3caa135..436dfb9aac0a7 100644 --- a/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp +++ b/clang/test/SemaCXX/invalid-requirement-requires-expr.cpp @@ -17,8 +17,7 @@ constexpr bool A::far() { b.data_member; requires A::far(); // #Invalid // expected-error@#Invalid {{recursive template instantiation exceeded maximum depth}} - // expected-note@#Invalid {{in instantiation}} - // expected-note@#Invalid 2 {{while}} + // expected-note@#Invalid 3 {{while}} // expected-note@#Invalid {{contexts in backtrace}} // expected-note@#Invalid {{increase recursive template instantiation depth}} }; diff --git a/clang/test/SemaTemplate/instantiation-depth-subst-2.cpp b/clang/test/SemaTemplate/instantiation-depth-subst-2.cpp index 2b519e974a907..66fd1af0d1429 100644 --- a/clang/test/SemaTemplate/instantiation-depth-subst-2.cpp +++ b/clang/test/SemaTemplate/instantiation-depth-subst-2.cpp @@ -2,5 +2,6 @@ template struct S { }; template S operator+(T, T); // expected-error {{instantiation exceeded maximum depth}} expected-note 2{{while substituting}} +// expected-note@-1 {{use -ftemplate-depth=N to increase recursive template instantiation depth}} S<0> s; int k = s + s; // expected-note {{while substituting}} diff --git a/clang/test/SemaTemplate/instantiation-depth-subst.cpp b/clang/test/SemaTemplate/instantiation-depth-subst.cpp index 062a8ed08bb64..17944bc3aaa40 100644 --- a/clang/test/SemaTemplate/instantiation-depth-subst.cpp +++ b/clang/test/SemaTemplate/instantiation-depth-subst.cpp @@ -3,7 +3,8 @@ // PR9793 template auto f(T t) -> decltype(f(t)); // \ // expected-error {{recursive template instantiation exceeded maximum depth of 2}} \ -// expected-note 2 {{while substituting}} +// expected-note 2 {{while substituting}} \ +// expected-note {{use -ftemplate-depth=N to increase recursive template instantiation depth}} struct S {}; int k = f(S{}); // expected-note {{while substituting}} diff --git a/clang/test/SemaTemplate/temp_arg_nontype_cxx2c.cpp b/clang/test/SemaTemplate/temp_arg_nontype_cxx2c.cpp index e74c031eba4c1..c4ac36e263bc8 100644 --- a/clang/test/SemaTemplate/temp_arg_nontype_cxx2c.cpp +++ b/clang/test/SemaTemplate/temp_arg_nontype_cxx2c.cpp @@ -123,3 +123,14 @@ Set sf; // expected-note@#C {{evaluated to false}} } // namespace GH84052 + +namespace error_on_type_instantiation { + int f(int) = delete; + // expected-note@-1 {{candidate function has been explicitly deleted}} + template struct X {}; + // expected-error@-1 {{call to deleted function 'f'}} + template void g() { X x; } + // expected-note@-1 {{while substituting prior template arguments into non-type template parameter [with T = int]}} + template void g(); + // expected-note@-1 {{in instantiation of function template specialization}} +} diff --git a/clang/test/SemaTemplate/temp_arg_template_p0522.cpp b/clang/test/SemaTemplate/temp_arg_template_p0522.cpp index d8a81bb363112..60d98a653ff02 100644 --- a/clang/test/SemaTemplate/temp_arg_template_p0522.cpp +++ b/clang/test/SemaTemplate/temp_arg_template_p0522.cpp @@ -83,11 +83,11 @@ namespace DependentType { namespace Auto { template typename T> struct TInt {}; // #TInt template typename T> struct TIntPtr {}; // #TIntPtr - template typename T> struct TAuto {}; + template typename T> struct TAuto {}; // #TAuto template typename T> struct TAutoPtr {}; - template typename T> struct TDecltypeAuto {}; + template typename T> struct TDecltypeAuto {}; // #TDecltypeAuto template struct Auto; - template struct AutoPtr; // #AutoPtr + template struct AutoPtr; template struct DecltypeAuto; template struct Int; template struct IntPtr; @@ -108,7 +108,7 @@ namespace Auto { TIntPtr ipip; TAuto aa; - TAuto aap; // expected-error@#AutoPtr {{could not match 'auto *' against 'auto'}} + TAuto aap; // expected-error@#TAuto {{non-type template parameter '' with type 'auto *' has incompatible initializer of type 'auto'}} // expected-note@-1 {{different template parameters}} TAuto ai; // FIXME: ill-formed (?) TAuto aip; // FIXME: ill-formed (?) @@ -130,7 +130,7 @@ namespace Auto { // parameters (such as 'user-defined-type &') that are not valid 'auto' // parameters. TDecltypeAuto daa; - TDecltypeAuto daap; // expected-error@#AutoPtr {{could not match 'auto *' against 'decltype(auto)'}} + TDecltypeAuto daap; // expected-error@#TDecltypeAuto {{non-type template parameter '' with type 'auto *' has incompatible initializer of type 'decltype(auto)'}} // expected-note@-1 {{different template parameters}} int n; diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp index a56e758fb75d8..be658acacb406 100644 --- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp +++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp @@ -717,6 +717,14 @@ wrapDeviceImages(ArrayRef> Buffers, M, BuffersToWrap.front(), offloading::getOffloadEntryArray(M))) return std::move(Err); break; + case OFK_SYCL: { + // TODO: fill these options once the Driver supports them. + offloading::SYCLJITOptions Options; + if (Error Err = + offloading::wrapSYCLBinaries(M, BuffersToWrap.front(), Options)) + return std::move(Err); + break; + } default: return createStringError(getOffloadKindName(Kind) + " wrapping is not supported"); @@ -754,6 +762,32 @@ bundleOpenMP(ArrayRef Images) { return std::move(Buffers); } +Expected>> +bundleSYCL(ArrayRef Images) { + SmallVector> Buffers; + if (DryRun) { + // In dry-run mode there is an empty input which is insufficient for the + // testing. Therefore, we return here a stub image. + OffloadingImage Image; + Image.TheImageKind = IMG_None; + Image.TheOffloadKind = OffloadKind::OFK_SYCL; + Image.StringData["symbols"] = "stub"; + Image.Image = MemoryBuffer::getMemBufferCopy(""); + SmallString<0> SerializedImage = OffloadBinary::write(Image); + Buffers.emplace_back(MemoryBuffer::getMemBufferCopy(SerializedImage)); + return std::move(Buffers); + } + + for (const OffloadingImage &Image : Images) { + // clang-sycl-linker packs outputs into one binary blob. Therefore, it is + // passed to Offload Wrapper as is. + StringRef S(Image.Image->getBufferStart(), Image.Image->getBufferSize()); + Buffers.emplace_back(MemoryBuffer::getMemBufferCopy(S)); + } + + return std::move(Buffers); +} + Expected>> bundleCuda(ArrayRef Images, const ArgList &Args) { SmallVector, 4> InputFiles; @@ -806,8 +840,9 @@ bundleLinkedOutput(ArrayRef Images, const ArgList &Args, llvm::TimeTraceScope TimeScope("Bundle linked output"); switch (Kind) { case OFK_OpenMP: - case OFK_SYCL: return bundleOpenMP(Images); + case OFK_SYCL: + return bundleSYCL(Images); case OFK_Cuda: return bundleCuda(Images, Args); case OFK_HIP: diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp index 5aab74348967d..30e2be758cd39 100644 --- a/clang/tools/libclang/CIndex.cpp +++ b/clang/tools/libclang/CIndex.cpp @@ -2148,6 +2148,9 @@ class EnqueueVisitor : public ConstStmtVisitor, void VisitOMPUnrollDirective(const OMPUnrollDirective *D); void VisitOMPReverseDirective(const OMPReverseDirective *D); void VisitOMPInterchangeDirective(const OMPInterchangeDirective *D); + void VisitOMPCanonicalLoopSequenceTransformationDirective( + const OMPCanonicalLoopSequenceTransformationDirective *D); + void VisitOMPFuseDirective(const OMPFuseDirective *D); void VisitOMPForDirective(const OMPForDirective *D); void VisitOMPForSimdDirective(const OMPForSimdDirective *D); void VisitOMPSectionsDirective(const OMPSectionsDirective *D); @@ -2353,6 +2356,11 @@ void OMPClauseEnqueue::VisitOMPPartialClause(const OMPPartialClause *C) { Visitor->AddStmt(C->getFactor()); } +void OMPClauseEnqueue::VisitOMPLoopRangeClause(const OMPLoopRangeClause *C) { + Visitor->AddStmt(C->getFirst()); + Visitor->AddStmt(C->getCount()); +} + void OMPClauseEnqueue::VisitOMPAllocatorClause(const OMPAllocatorClause *C) { Visitor->AddStmt(C->getAllocator()); } @@ -3317,6 +3325,15 @@ void EnqueueVisitor::VisitOMPInterchangeDirective( VisitOMPCanonicalLoopNestTransformationDirective(D); } +void EnqueueVisitor::VisitOMPCanonicalLoopSequenceTransformationDirective( + const OMPCanonicalLoopSequenceTransformationDirective *D) { + VisitOMPExecutableDirective(D); +} + +void EnqueueVisitor::VisitOMPFuseDirective(const OMPFuseDirective *D) { + VisitOMPCanonicalLoopSequenceTransformationDirective(D); +} + void EnqueueVisitor::VisitOMPForDirective(const OMPForDirective *D) { VisitOMPLoopDirective(D); } @@ -6275,6 +6292,8 @@ CXString clang_getCursorKindSpelling(enum CXCursorKind Kind) { return cxstring::createRef("OMPReverseDirective"); case CXCursor_OMPInterchangeDirective: return cxstring::createRef("OMPInterchangeDirective"); + case CXCursor_OMPFuseDirective: + return cxstring::createRef("OMPFuseDirective"); case CXCursor_OMPForDirective: return cxstring::createRef("OMPForDirective"); case CXCursor_OMPForSimdDirective: diff --git a/clang/tools/libclang/CXCursor.cpp b/clang/tools/libclang/CXCursor.cpp index 3c4062410eac1..56f113c1dc309 100644 --- a/clang/tools/libclang/CXCursor.cpp +++ b/clang/tools/libclang/CXCursor.cpp @@ -687,6 +687,9 @@ CXCursor cxcursor::MakeCXCursor(const Stmt *S, const Decl *Parent, case Stmt::OMPInterchangeDirectiveClass: K = CXCursor_OMPInterchangeDirective; break; + case Stmt::OMPFuseDirectiveClass: + K = CXCursor_OMPFuseDirective; + break; case Stmt::OMPForDirectiveClass: K = CXCursor_OMPForDirective; break; diff --git a/clang/unittests/Format/QualifierFixerTest.cpp b/clang/unittests/Format/QualifierFixerTest.cpp index f42f2e307f713..58e64ff368946 100644 --- a/clang/unittests/Format/QualifierFixerTest.cpp +++ b/clang/unittests/Format/QualifierFixerTest.cpp @@ -1195,6 +1195,41 @@ TEST_F(QualifierFixerTest, QualifiersBrokenUpByPPDirectives) { Style); } +TEST_F(QualifierFixerTest, QualifierOrderingAfterPreprocessorDirectives) { + auto Style = getLLVMStyle(); + Style.QualifierAlignment = FormatStyle::QAS_Custom; + Style.QualifierOrder = {"static", "inline", "const", "type"}; + + verifyFormat("#if 1\n" + "void foo(const int par);\n" + "const int var1;\n" + "#endif\n" + "\n" + "const int var2;\n" + "const int var3;", + "#if 1\n" + "void foo(int const par);\n" + "int const var1;\n" + "#endif\n" + "\n" + "int const var2;\n" + "int const var3;", + Style); + verifyFormat("#if defined(FOO)\n" + "static const int x = 1;\n" + "#else\n" + "static const int x = 2;\n" + "#endif\n" + "static const int y = 3;", + "#if defined(FOO)\n" + "const static int x = 1;\n" + "#else\n" + "const static int x = 2;\n" + "#endif\n" + "const static int y = 3;", + Style); +} + TEST_F(QualifierFixerTest, UnsignedQualifier) { FormatStyle Style = getLLVMStyle(); diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp index 4c43a963632a6..4a8f27f656f1d 100644 --- a/clang/unittests/Format/TokenAnnotatorTest.cpp +++ b/clang/unittests/Format/TokenAnnotatorTest.cpp @@ -2237,6 +2237,12 @@ TEST_F(TokenAnnotatorTest, UnderstandsLambdas) { ASSERT_EQ(Tokens.size(), 21u) << Tokens; EXPECT_TOKEN(Tokens[11], tok::l_square, TT_LambdaLSquare); EXPECT_TOKEN(Tokens[13], tok::l_brace, TT_LambdaLBrace); + + Tokens = annotate("SomeFunction({[]() -> int *[] { return {}; }});"); + ASSERT_EQ(Tokens.size(), 22u) << Tokens; + EXPECT_TOKEN(Tokens[3], tok::l_square, TT_LambdaLSquare); + EXPECT_TOKEN(Tokens[5], tok::l_paren, TT_LambdaDefinitionLParen); + EXPECT_TOKEN(Tokens[10], tok::l_square, TT_ArraySubscriptLSquare); } TEST_F(TokenAnnotatorTest, UnderstandsFunctionAnnotations) { @@ -4159,7 +4165,15 @@ TEST_F(TokenAnnotatorTest, LineCommentTrailingBackslash) { EXPECT_TOKEN(Tokens[1], tok::comment, TT_LineComment); } -TEST_F(TokenAnnotatorTest, KeywordedFunctionLikeMacro) { +TEST_F(TokenAnnotatorTest, ArrowAfterSubscript) { + auto Tokens = + annotate("return (getStructType()->getElements())[eIdx]->getName();"); + ASSERT_EQ(Tokens.size(), 19u) << Tokens; + // Not TT_LambdaArrow. + EXPECT_TOKEN(Tokens[13], tok::arrow, TT_Unknown); +} + +TEST_F(TokenAnnotatorTest, QtProperty) { auto Style = getLLVMStyle(); Style.AllowBreakBeforeQtProperty = true; diff --git a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp index bdf328f892063..f52ae9ae8d17c 100644 --- a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp +++ b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp @@ -55,8 +55,10 @@ using namespace __asan; if (LIKELY(replace_intrin_cached)) { \ ASAN_READ_RANGE(ctx, from, size); \ ASAN_WRITE_RANGE(ctx, to, size); \ + } else if (UNLIKELY(!AsanInited())) { \ + return internal_memmove(to, from, size); \ } \ - return internal_memmove(to, from, size); \ + return REAL(memmove)(to, from, size); \ } while (0) void *__asan_memcpy(void *to, const void *from, uptr size) { diff --git a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h index 14727a5d665ed..ec988cff51c59 100644 --- a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h +++ b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h @@ -20,6 +20,7 @@ DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size) DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size) +DECLARE_REAL(void *, memmove, void *to, const void *from, SIZE_T size) namespace __asan { diff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h index 12a75fea24fba..3ea30630e1827 100644 --- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h +++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h @@ -35,7 +35,7 @@ struct InputInfo { size_t Tmp = 0; // Used by ValidateFeatureSet. // Stats. size_t NumExecutedMutations = 0; - size_t NumSuccessfullMutations = 0; + size_t NumSuccessfulMutations = 0; bool NeverReduce = false; bool MayDeleteFile = false; bool Reduced = false; @@ -328,7 +328,7 @@ class InputCorpus { const auto &II = *Inputs[i]; Printf(" [% 3zd %s] sz: % 5zd runs: % 5zd succ: % 5zd focus: %d\n", i, Sha1ToString(II.Sha1).c_str(), II.U.size(), - II.NumExecutedMutations, II.NumSuccessfullMutations, + II.NumExecutedMutations, II.NumSuccessfulMutations, II.HasFocusFunction); } } diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp index 6b25aa9942d2e..5928d1d96acd1 100644 --- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp @@ -609,7 +609,7 @@ int AnalyzeDictionary(Fuzzer *F, const std::vector &Dict, return 0; } -std::vector ParseSeedInuts(const char *seed_inputs) { +std::vector ParseSeedInputs(const char *seed_inputs) { // Parse -seed_inputs=file1,file2,... or -seed_inputs=@seed_inputs_file std::vector Files; if (!seed_inputs) return Files; @@ -919,7 +919,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { exit(0); } - auto CorporaFiles = ReadCorpora(*Inputs, ParseSeedInuts(Flags.seed_inputs)); + auto CorporaFiles = ReadCorpora(*Inputs, ParseSeedInputs(Flags.seed_inputs)); F->Loop(CorporaFiles); if (Flags.verbosity) diff --git a/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp b/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp index 566820ae6d198..cb29af9329bb2 100644 --- a/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWindows.cpp @@ -35,7 +35,7 @@ using namespace fuzzer; #define WIN_SYM_PREFIX #endif -// Declare external functions as having alternativenames, so that we can +// Declare external functions as having alternative names, so that we can // determine if they are not defined. #define EXTERNAL_FUNC(Name, Default) \ __pragma(comment(linker, "/alternatename:" WIN_SYM_PREFIX STRINGIFY( \ diff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp index 4ea1fb1507ca4..a93cd16b87934 100644 --- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp @@ -666,7 +666,7 @@ void Fuzzer::PrintStatusForNewUnit(const Unit &U, const char *Text) { } void Fuzzer::ReportNewCoverage(InputInfo *II, const Unit &U) { - II->NumSuccessfullMutations++; + II->NumSuccessfulMutations++; MD.RecordSuccessfulMutationSequence(); PrintStatusForNewUnit(U, II->Reduced ? "REDUCE" : "NEW "); WriteToOutputCorpus(U); diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp index 41f11a2e48d47..4587f8616b019 100644 --- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp +++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp @@ -101,7 +101,7 @@ size_t MutationDispatcher::Mutate_CustomCrossOver(uint8_t *Data, size_t Size, if (!NewSize) return 0; - assert(NewSize <= MaxSize && "CustomCrossOver returned overisized unit"); + assert(NewSize <= MaxSize && "CustomCrossOver returned oversized unit"); memcpy(Data, U.data(), NewSize); return NewSize; } @@ -463,7 +463,7 @@ size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size, default: assert(0); } assert(NewSize > 0 && "CrossOver returned empty unit"); - assert(NewSize <= MaxSize && "CrossOver returned overisized unit"); + assert(NewSize <= MaxSize && "CrossOver returned oversized unit"); return NewSize; } diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 5b56b973d55f8..5fdfd1e7c55cc 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -1043,7 +1043,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, StackDepot) { // which covers only simple operations and ensure the configuration is able to // compile. TEST(ScudoCombinedTest, BasicTrustyConfig) { - using AllocatorT = scudo::Allocator; + using AllocatorT = TestAllocator; auto Allocator = std::unique_ptr(new AllocatorT()); for (scudo::uptr ClassId = 1U; @@ -1107,7 +1107,7 @@ struct TestQuarantineConfig { // Verify that the quarantine exists by default. TEST(ScudoCombinedTest, QuarantineEnabled) { - using AllocatorT = scudo::Allocator; + using AllocatorT = TestAllocator; auto Allocator = std::unique_ptr(new AllocatorT()); const scudo::uptr Size = 1000U; @@ -1132,7 +1132,7 @@ struct TestQuarantineDisabledConfig : TestQuarantineConfig { }; TEST(ScudoCombinedTest, QuarantineDisabled) { - using AllocatorT = scudo::Allocator; + using AllocatorT = TestAllocator; auto Allocator = std::unique_ptr(new AllocatorT()); const scudo::uptr Size = 1000U; @@ -1154,7 +1154,7 @@ TEST(ScudoCombinedTest, QuarantineDisabled) { // Verify that no special quarantine blocks appear in iterateOverChunks. TEST(ScudoCombinedTest, QuarantineIterateOverChunks) { - using AllocatorT = scudo::Allocator; + using AllocatorT = TestAllocator; auto Allocator = std::unique_ptr(new AllocatorT()); // Do a bunch of allocations and deallocations. At the end there should diff --git a/compiler-rt/test/builtins/Unit/fixunstfdi_test.c b/compiler-rt/test/builtins/Unit/fixunstfdi_test.c index cfe1a87b0a5df..982f3a4629dbd 100644 --- a/compiler-rt/test/builtins/Unit/fixunstfdi_test.c +++ b/compiler-rt/test/builtins/Unit/fixunstfdi_test.c @@ -4,7 +4,7 @@ #include -#if defined(CRT_HAS_TF_MODE) +#if _ARCH_PPC || __aarch64__ || __arm64ec__ #include "int_lib.h" @@ -35,7 +35,7 @@ char assumption_3[sizeof(long double)*CHAR_BIT == 128] = {0}; int main() { -#if defined(CRT_HAS_TF_MODE) +#if _ARCH_PPC || __aarch64__ || __arm64ec__ if (test__fixunstfdi(0.0, 0)) return 1; diff --git a/compiler-rt/test/builtins/Unit/multc3_test.c b/compiler-rt/test/builtins/Unit/multc3_test.c index 7ae4cb5b710a7..e9c99a72be35e 100644 --- a/compiler-rt/test/builtins/Unit/multc3_test.c +++ b/compiler-rt/test/builtins/Unit/multc3_test.c @@ -4,7 +4,7 @@ #include -#if defined(CRT_HAS_128BIT) && defined(CRT_HAS_F128) +#if _ARCH_PPC || __aarch64__ || __arm64ec__ #include "int_lib.h" #include @@ -348,7 +348,7 @@ long double x[][2] = int main() { -#if defined(CRT_HAS_128BIT) && defined(CRT_HAS_F128) +#if _ARCH_PPC || __aarch64__ || __arm64ec__ const unsigned N = sizeof(x) / sizeof(x[0]); unsigned i, j; for (i = 0; i < N; ++i) diff --git a/flang-rt/cmake/modules/AddFlangRT.cmake b/flang-rt/cmake/modules/AddFlangRT.cmake index 43b1451d65e32..ab383bcbe2cdf 100644 --- a/flang-rt/cmake/modules/AddFlangRT.cmake +++ b/flang-rt/cmake/modules/AddFlangRT.cmake @@ -251,8 +251,15 @@ function (add_flangrt_library name) $<$:-nogpulib -flto -fvisibility=hidden -Wno-unknown-cuda-version --cuda-feature=+ptx63> ) elseif (APPLE) + # Clang on Darwin enables non-POSIX extensions by default. + # This causes some macros to leak, such as HUGE from , which + # causes some conflicts with Flang symbols (but not with Flang-RT, for + # now). + # It also causes some Flang-RT extensions to be disabled, such as fdate, + # that checks for _POSIX_C_SOURCE. + # Setting _POSIX_C_SOURCE avoids these issues. target_compile_options(${tgtname} PRIVATE - $<$:${DARWIN_osx_BUILTIN_MIN_VER_FLAG}> + $<$:${DARWIN_osx_BUILTIN_MIN_VER_FLAG} -D_POSIX_C_SOURCE=200809> ) endif () diff --git a/flang-rt/lib/cuda/memory.cpp b/flang-rt/lib/cuda/memory.cpp index d830580e6a066..78270fef07c36 100644 --- a/flang-rt/lib/cuda/memory.cpp +++ b/flang-rt/lib/cuda/memory.cpp @@ -25,23 +25,22 @@ extern "C" { void *RTDEF(CUFMemAlloc)( std::size_t bytes, unsigned type, const char *sourceFile, int sourceLine) { void *ptr = nullptr; - if (bytes != 0) { - if (type == kMemTypeDevice) { - if (Fortran::runtime::executionEnvironment.cudaDeviceIsManaged) { - CUDA_REPORT_IF_ERROR( - cudaMallocManaged((void **)&ptr, bytes, cudaMemAttachGlobal)); - } else { - CUDA_REPORT_IF_ERROR(cudaMalloc((void **)&ptr, bytes)); - } - } else if (type == kMemTypeManaged || type == kMemTypeUnified) { + bytes = bytes ? bytes : 1; + if (type == kMemTypeDevice) { + if (Fortran::runtime::executionEnvironment.cudaDeviceIsManaged) { CUDA_REPORT_IF_ERROR( cudaMallocManaged((void **)&ptr, bytes, cudaMemAttachGlobal)); - } else if (type == kMemTypePinned) { - CUDA_REPORT_IF_ERROR(cudaMallocHost((void **)&ptr, bytes)); } else { - Terminator terminator{sourceFile, sourceLine}; - terminator.Crash("unsupported memory type"); + CUDA_REPORT_IF_ERROR(cudaMalloc((void **)&ptr, bytes)); } + } else if (type == kMemTypeManaged || type == kMemTypeUnified) { + CUDA_REPORT_IF_ERROR( + cudaMallocManaged((void **)&ptr, bytes, cudaMemAttachGlobal)); + } else if (type == kMemTypePinned) { + CUDA_REPORT_IF_ERROR(cudaMallocHost((void **)&ptr, bytes)); + } else { + Terminator terminator{sourceFile, sourceLine}; + terminator.Crash("unsupported memory type"); } return ptr; } diff --git a/flang-rt/unittests/CMakeLists.txt b/flang-rt/unittests/CMakeLists.txt index fd63ad11dcf43..53cd54dfd215e 100644 --- a/flang-rt/unittests/CMakeLists.txt +++ b/flang-rt/unittests/CMakeLists.txt @@ -78,6 +78,15 @@ function(add_flangrt_dependent_libs target) instead falls back to builtins from Compiler-RT. Linking with ${tgtname} may result in a linker error.") endif () + elseif (APPLE) + # Clang on Darwin enables non-POSIX extensions by default. + # This causes some macros to leak, such as HUGE from , which + # causes some conflicts with Flang symbols (but not with Flang-RT, for + # now). + # It also causes some Flang-RT extensions to be disabled, such as fdate, + # that checks for _POSIX_C_SOURCE. + # Setting _POSIX_C_SOURCE avoids these issues. + target_compile_options(${target} PRIVATE "-D_POSIX_C_SOURCE=200809") endif () endfunction() diff --git a/flang-rt/unittests/Runtime/CUDA/Memory.cpp b/flang-rt/unittests/Runtime/CUDA/Memory.cpp index f2e17870f7999..c84c54a1376e5 100644 --- a/flang-rt/unittests/Runtime/CUDA/Memory.cpp +++ b/flang-rt/unittests/Runtime/CUDA/Memory.cpp @@ -35,6 +35,12 @@ TEST(MemoryCUFTest, SimpleAllocTramsferFree) { RTNAME(CUFMemFree)((void *)dev, kMemTypeDevice, __FILE__, __LINE__); } +TEST(MemoryCUFTest, AllocZero) { + int *dev = (int *)RTNAME(CUFMemAlloc)(0, kMemTypeDevice, __FILE__, __LINE__); + EXPECT_TRUE(dev != 0); + RTNAME(CUFMemFree)((void *)dev, kMemTypeDevice, __FILE__, __LINE__); +} + static OwningPtr createAllocatable( Fortran::common::TypeCategory tc, int kind, int rank = 1) { return Descriptor::Create(TypeCode{tc, kind}, kind, nullptr, rank, nullptr, diff --git a/flang/examples/FeatureList/FeatureList.cpp b/flang/examples/FeatureList/FeatureList.cpp index 569d2b2307f36..daa012e3eb08b 100644 --- a/flang/examples/FeatureList/FeatureList.cpp +++ b/flang/examples/FeatureList/FeatureList.cpp @@ -451,9 +451,6 @@ struct NodeVisitor { READ_FEATURE(OmpBlockConstruct) READ_FEATURE(OmpClause) READ_FEATURE(OmpClauseList) - READ_FEATURE(OmpDeclareTargetSpecifier) - READ_FEATURE(OmpDeclareTargetWithClause) - READ_FEATURE(OmpDeclareTargetWithList) READ_FEATURE(OmpDefaultClause) READ_FEATURE(OmpDefaultClause::DataSharingAttribute) READ_FEATURE(OmpDefaultmapClause) diff --git a/flang/include/flang/Frontend/CodeGenOptions.h b/flang/include/flang/Frontend/CodeGenOptions.h index df6063cc90340..3dca169d43b39 100644 --- a/flang/include/flang/Frontend/CodeGenOptions.h +++ b/flang/include/flang/Frontend/CodeGenOptions.h @@ -168,6 +168,13 @@ class CodeGenOptions : public CodeGenOptionsBase { /// by -fprofile-sample-use or -fprofile-instr-use. std::string ProfileRemappingFile; + /// The name for the split debug info file used for the DW_AT_[GNU_]dwo_name + /// attribute in the skeleton CU. + std::string SplitDwarfFile; + + /// Output filename for the split debug info, not used in the skeleton CU. + std::string SplitDwarfOutput; + /// Check if Clang profile instrumenation is on. bool hasProfileClangInstr() const { return getProfileInstr() == llvm::driver::ProfileClangInstr; diff --git a/flang/include/flang/Lower/OpenMP/Clauses.h b/flang/include/flang/Lower/OpenMP/Clauses.h index 5267a58c7e7f7..5cd196a7869a2 100644 --- a/flang/include/flang/Lower/OpenMP/Clauses.h +++ b/flang/include/flang/Lower/OpenMP/Clauses.h @@ -243,6 +243,7 @@ using Initializer = tomp::clause::InitializerT; using InReduction = tomp::clause::InReductionT; using IsDevicePtr = tomp::clause::IsDevicePtrT; using Lastprivate = tomp::clause::LastprivateT; +using LoopRange = tomp::clause::LoopRangeT; using Linear = tomp::clause::LinearT; using Link = tomp::clause::LinkT; using Map = tomp::clause::MapT; diff --git a/flang/include/flang/Parser/dump-parse-tree.h b/flang/include/flang/Parser/dump-parse-tree.h index 7540d38baa584..14885293fd5eb 100644 --- a/flang/include/flang/Parser/dump-parse-tree.h +++ b/flang/include/flang/Parser/dump-parse-tree.h @@ -538,9 +538,6 @@ class ParseTreeDumper { NODE_ENUM(OmpCloseModifier, Value) NODE(parser, OmpContainsClause) NODE(parser, OmpContextSelectorSpecification) - NODE(parser, OmpDeclareTargetSpecifier) - NODE(parser, OmpDeclareTargetWithClause) - NODE(parser, OmpDeclareTargetWithList) NODE(parser, OmpDeclareVariantDirective) NODE(parser, OmpDefaultClause) NODE_ENUM(OmpDefaultClause, DataSharingAttribute) @@ -616,6 +613,7 @@ class ParseTreeDumper { NODE_ENUM(OmpLinearModifier, Value) NODE(parser, OmpLocator) NODE(parser, OmpLocatorList) + NODE(parser, OmpLoopRangeClause) NODE(parser, OmpMapClause) NODE(OmpMapClause, Modifier) NODE(parser, OmpMapper) diff --git a/flang/include/flang/Parser/openmp-utils.h b/flang/include/flang/Parser/openmp-utils.h index 4b8fe6a5b49f0..b8f3559097750 100644 --- a/flang/include/flang/Parser/openmp-utils.h +++ b/flang/include/flang/Parser/openmp-utils.h @@ -39,11 +39,8 @@ struct ConstructId { } MAKE_CONSTR_ID(OpenMPDeclarativeAllocate, D::OMPD_allocate); -MAKE_CONSTR_ID(OpenMPDeclarativeAssumes, D::OMPD_assumes); MAKE_CONSTR_ID(OpenMPDeclareReductionConstruct, D::OMPD_declare_reduction); -MAKE_CONSTR_ID(OpenMPDeclareTargetConstruct, D::OMPD_declare_target); MAKE_CONSTR_ID(OpenMPExecutableAllocate, D::OMPD_allocate); -MAKE_CONSTR_ID(OpenMPRequiresConstruct, D::OMPD_requires); #undef MAKE_CONSTR_ID @@ -95,11 +92,8 @@ struct DirectiveNameScope { if constexpr (std::is_base_of_v) { return std::get(x.t).DirName(); } else if constexpr (std::is_same_v || - std::is_same_v || std::is_same_v || - std::is_same_v || - std::is_same_v || - std::is_same_v) { + std::is_same_v) { return MakeName(std::get(x.t).source, ConstructId::id); } else { return GetFromTuple( diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h index be30a95763208..325ca9b4a227b 100644 --- a/flang/include/flang/Parser/parse-tree.h +++ b/flang/include/flang/Parser/parse-tree.h @@ -4546,6 +4546,15 @@ struct OmpLinearClause { std::tuple t; }; +// Ref: [6.0:207-208] +// +// loop-range-clause -> +// LOOPRANGE(first, count) // since 6.0 +struct OmpLoopRangeClause { + TUPLE_CLASS_BOILERPLATE(OmpLoopRangeClause); + std::tuple t; +}; + // Ref: [4.5:216-219], [5.0:315-324], [5.1:347-355], [5.2:150-158] // // map-clause -> @@ -4877,8 +4886,8 @@ struct OpenMPUtilityConstruct { // ASSUMES absent-clause | contains-clause | holds-clause | no-openmp-clause | // no-openmp-routines-clause | no-parallelism-clause struct OpenMPDeclarativeAssumes { - TUPLE_CLASS_BOILERPLATE(OpenMPDeclarativeAssumes); - std::tuple t; + WRAPPER_CLASS_BOILERPLATE( + OpenMPDeclarativeAssumes, OmpDirectiveSpecification); CharBlock source; }; @@ -4943,28 +4952,16 @@ struct OmpDeclareVariantDirective { CharBlock source; }; -// 2.10.6 declare-target -> DECLARE TARGET (extended-list) | -// DECLARE TARGET [declare-target-clause[ [,] -// declare-target-clause]...] -struct OmpDeclareTargetWithList { - WRAPPER_CLASS_BOILERPLATE(OmpDeclareTargetWithList, OmpObjectList); - CharBlock source; -}; - -struct OmpDeclareTargetWithClause { - WRAPPER_CLASS_BOILERPLATE(OmpDeclareTargetWithClause, OmpClauseList); - CharBlock source; -}; - -struct OmpDeclareTargetSpecifier { - UNION_CLASS_BOILERPLATE(OmpDeclareTargetSpecifier); - std::variant u; -}; - +// Ref: [4.5:110-113], [5.0:180-185], [5.1:210-216], [5.2:206-207], +// [6.0:346-348] +// +// declare-target-directive -> // since 4.5 +// DECLARE_TARGET[(extended-list)] | +// DECLARE_TARGET clause-list struct OpenMPDeclareTargetConstruct { - TUPLE_CLASS_BOILERPLATE(OpenMPDeclareTargetConstruct); + WRAPPER_CLASS_BOILERPLATE( + OpenMPDeclareTargetConstruct, OmpDirectiveSpecification); CharBlock source; - std::tuple t; }; // OMP v5.2: 5.8.8 @@ -5003,9 +5000,8 @@ struct OpenMPGroupprivate { // 2.4 requires -> REQUIRES requires-clause[ [ [,] requires-clause]...] struct OpenMPRequiresConstruct { - TUPLE_CLASS_BOILERPLATE(OpenMPRequiresConstruct); + WRAPPER_CLASS_BOILERPLATE(OpenMPRequiresConstruct, OmpDirectiveSpecification); CharBlock source; - std::tuple t; }; // 2.15.2 threadprivate -> THREADPRIVATE (variable-name-list) diff --git a/flang/lib/Frontend/CompilerInvocation.cpp b/flang/lib/Frontend/CompilerInvocation.cpp index 09b51730d6216..81610edee36fb 100644 --- a/flang/lib/Frontend/CompilerInvocation.cpp +++ b/flang/lib/Frontend/CompilerInvocation.cpp @@ -160,6 +160,12 @@ static bool parseDebugArgs(Fortran::frontend::CodeGenOptions &opts, opts.DwarfVersion = getLastArgIntValue(args, clang::driver::options::OPT_dwarf_version_EQ, /*Default=*/0, diags); + if (const llvm::opt::Arg *a = + args.getLastArg(clang::driver::options::OPT_split_dwarf_file)) + opts.SplitDwarfFile = a->getValue(); + if (const llvm::opt::Arg *a = + args.getLastArg(clang::driver::options::OPT_split_dwarf_output)) + opts.SplitDwarfOutput = a->getValue(); } return true; } diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp index 51d1e4b547fce..d5e0325911adf 100644 --- a/flang/lib/Frontend/FrontendActions.cpp +++ b/flang/lib/Frontend/FrontendActions.cpp @@ -898,7 +898,19 @@ static void generateMachineCodeOrAssemblyImpl(clang::DiagnosticsEngine &diags, llvm::CodeGenFileType cgft = (act == BackendActionTy::Backend_EmitAssembly) ? llvm::CodeGenFileType::AssemblyFile : llvm::CodeGenFileType::ObjectFile; - if (tm.addPassesToEmitFile(codeGenPasses, os, nullptr, cgft)) { + std::unique_ptr dwoOS; + if (!codeGenOpts.SplitDwarfOutput.empty()) { + std::error_code ec; + dwoOS = std::make_unique(codeGenOpts.SplitDwarfOutput, + ec, llvm::sys::fs::OF_None); + if (ec) { + diags.Report(clang::diag::err_fe_unable_to_open_output) + << codeGenOpts.SplitDwarfOutput << ec.message(); + return; + } + } + if (tm.addPassesToEmitFile(codeGenPasses, os, dwoOS ? &dwoOS->os() : nullptr, + cgft)) { unsigned diagID = diags.getCustomDiagID(clang::DiagnosticsEngine::Error, "emission of this file type is not supported"); @@ -909,6 +921,9 @@ static void generateMachineCodeOrAssemblyImpl(clang::DiagnosticsEngine &diags, // Run the passes codeGenPasses.run(llvmModule); + if (dwoOS) + dwoOS->keep(); + // Cleanup delete tlii; } @@ -1322,6 +1337,7 @@ void CodeGenAction::executeAction() { llvm::TargetMachine &targetMachine = ci.getTargetMachine(); targetMachine.Options.MCOptions.AsmVerbose = targetOpts.asmVerbose; + targetMachine.Options.MCOptions.SplitDwarfFile = codeGenOpts.SplitDwarfFile; const llvm::Triple &theTriple = targetMachine.getTargetTriple(); diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp index 4a5b9885bb7c4..149e51b501a82 100644 --- a/flang/lib/Lower/Bridge.cpp +++ b/flang/lib/Lower/Bridge.cpp @@ -2544,7 +2544,7 @@ class FirConverter : public Fortran::lower::AbstractConverter { auto loopOp = fir::DoLoopOp::create( *builder, loc, lowerValue, upperValue, stepValue, /*unordered=*/false, - /*finalCountValue=*/true, + /*finalCountValue=*/false, builder->createConvert(loc, loopVarType, lowerValue)); info.loopOp = loopOp; builder->setInsertionPointToStart(loopOp.getBody()); @@ -2696,22 +2696,18 @@ class FirConverter : public Fortran::lower::AbstractConverter { // Decrement tripVariable. auto doLoopOp = mlir::cast(info.loopOp); builder->setInsertionPointToEnd(doLoopOp.getBody()); - llvm::SmallVector results; - results.push_back(mlir::arith::AddIOp::create( - *builder, loc, doLoopOp.getInductionVar(), doLoopOp.getStep(), - iofAttr)); // Step loopVariable to help optimizations such as vectorization. // Induction variable elimination will clean up as necessary. mlir::Value step = builder->createConvert( loc, info.getLoopVariableType(), doLoopOp.getStep()); mlir::Value loopVar = fir::LoadOp::create(*builder, loc, info.loopVariable); - results.push_back( - mlir::arith::AddIOp::create(*builder, loc, loopVar, step, iofAttr)); - fir::ResultOp::create(*builder, loc, results); + mlir::Value loopVarInc = + mlir::arith::AddIOp::create(*builder, loc, loopVar, step, iofAttr); + fir::ResultOp::create(*builder, loc, loopVarInc); builder->setInsertionPointAfter(doLoopOp); // The loop control variable may be used after the loop. - fir::StoreOp::create(*builder, loc, doLoopOp.getResult(1), + fir::StoreOp::create(*builder, loc, doLoopOp.getResult(0), info.loopVariable); continue; } diff --git a/flang/lib/Lower/IO.cpp b/flang/lib/Lower/IO.cpp index 4ad2ac01334fa..98dc78f625b9e 100644 --- a/flang/lib/Lower/IO.cpp +++ b/flang/lib/Lower/IO.cpp @@ -977,9 +977,9 @@ static void genIoLoop(Fortran::lower::AbstractConverter &converter, fir::StoreOp::create(builder, loc, lcv, loopVar); genItemList(ioImpliedDo); builder.setInsertionPointToEnd(doLoopOp.getBody()); - mlir::Value result = mlir::arith::AddIOp::create( - builder, loc, doLoopOp.getInductionVar(), doLoopOp.getStep(), iofAttr); - fir::ResultOp::create(builder, loc, result); + // fir.do_loop's induction variable's increment is implied, + // so we do not need to increment it explicitly. + fir::ResultOp::create(builder, loc, doLoopOp.getInductionVar()); builder.setInsertionPointAfter(doLoopOp); // The loop control variable may be used after the loop. lcv = builder.createConvert(loc, fir::unwrapRefType(loopVar.getType()), diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp index 48b90ccea2f2a..fac37a372caaf 100644 --- a/flang/lib/Lower/OpenMP/Clauses.cpp +++ b/flang/lib/Lower/OpenMP/Clauses.cpp @@ -1036,6 +1036,11 @@ Link make(const parser::OmpClause::Link &inp, return Link{/*List=*/makeObjects(inp.v, semaCtx)}; } +LoopRange make(const parser::OmpClause::Looprange &inp, + semantics::SemanticsContext &semaCtx) { + llvm_unreachable("Unimplemented: looprange"); +} + Map make(const parser::OmpClause::Map &inp, semantics::SemanticsContext &semaCtx) { // inp.v -> parser::OmpMapClause diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index d2e865b3e1d0c..1cb3335abbd06 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -761,19 +761,17 @@ static void promoteNonCPtrUseDevicePtrArgsToUseDeviceAddr( static void getDeclareTargetInfo( lower::AbstractConverter &converter, semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval, - const parser::OpenMPDeclareTargetConstruct &declareTargetConstruct, + const parser::OpenMPDeclareTargetConstruct &construct, mlir::omp::DeclareTargetOperands &clauseOps, llvm::SmallVectorImpl &symbolAndClause) { - const auto &spec = - std::get(declareTargetConstruct.t); - if (const auto *objectList{parser::Unwrap(spec.u)}) { - ObjectList objects{makeObjects(*objectList, semaCtx)}; + + if (!construct.v.Arguments().v.empty()) { + ObjectList objects{makeObjects(construct.v.Arguments(), semaCtx)}; // Case: declare target(func, var1, var2) gatherFuncAndVarSyms(objects, mlir::omp::DeclareTargetCaptureClause::to, symbolAndClause, /*automap=*/false); - } else if (const auto *clauseList{ - parser::Unwrap(spec.u)}) { - List clauses = makeClauses(*clauseList, semaCtx); + } else { + List clauses = makeClauses(construct.v.Clauses(), semaCtx); if (clauses.empty()) { Fortran::lower::pft::FunctionLikeUnit *owningProc = eval.getOwningProcedure(); diff --git a/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp b/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp index af96c0be6fae9..bc6592dd72078 100644 --- a/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp +++ b/flang/lib/Optimizer/Transforms/AddDebugInfo.cpp @@ -694,7 +694,9 @@ void AddDebugInfoPass::runOnOperation() { mlir::LLVM::DICompileUnitAttr cuAttr = mlir::LLVM::DICompileUnitAttr::get( mlir::DistinctAttr::create(mlir::UnitAttr::get(context)), llvm::dwarf::getLanguage("DW_LANG_Fortran95"), fileAttr, producer, - isOptimized, debugLevel); + isOptimized, debugLevel, + /*nameTableKind=*/mlir::LLVM::DINameTableKind::Default, + /*splitDebugFilename=*/mlir::StringAttr()); module.walk([&](mlir::func::FuncOp funcOp) { handleFuncOp(funcOp, fileAttr, cuAttr, typeGen, &symbolTable); diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp index 0085576292ff5..9507021057476 100644 --- a/flang/lib/Parser/openmp-parsers.cpp +++ b/flang/lib/Parser/openmp-parsers.cpp @@ -1023,6 +1023,9 @@ TYPE_PARSER( maybe(":"_tok >> nonemptyList(Parser{})), /*PostModified=*/pure(true))) +TYPE_PARSER(construct( + scalarIntConstantExpr, "," >> scalarIntConstantExpr)) + // OpenMPv5.2 12.5.2 detach-clause -> DETACH (event-handle) TYPE_PARSER(construct(Parser{})) @@ -1207,6 +1210,8 @@ TYPE_PARSER( // parenthesized(Parser{}))) || "LINK" >> construct(construct( parenthesized(Parser{}))) || + "LOOPRANGE" >> construct(construct( + parenthesized(Parser{}))) || "MAP" >> construct(construct( parenthesized(Parser{}))) || "MATCH" >> construct(construct( @@ -1773,23 +1778,11 @@ TYPE_PARSER(sourced(construct( IsDirective(llvm::omp::Directive::OMPD_declare_reduction)) >= Parser{}))) -// declare-target with list -TYPE_PARSER(sourced(construct( - parenthesized(Parser{})))) - -// declare-target with clause -TYPE_PARSER( - sourced(construct(Parser{}))) - -// declare-target-specifier -TYPE_PARSER( - construct(Parser{}) || - construct(Parser{})) - // 2.10.6 Declare Target Construct TYPE_PARSER(sourced(construct( - verbatim("DECLARE TARGET"_tok) || verbatim("DECLARE_TARGET"_tok), - Parser{}))) + predicated(Parser{}, + IsDirective(llvm::omp::Directive::OMPD_declare_target)) >= + Parser{}))) static OmpMapperSpecifier ConstructOmpMapperSpecifier( std::optional &&mapperName, TypeSpec &&typeSpec, Name &&varName) { @@ -1848,7 +1841,9 @@ TYPE_PARSER(sourced( // // 2.4 Requires construct TYPE_PARSER(sourced(construct( - verbatim("REQUIRES"_tok), Parser{}))) + predicated(OmpDirectiveNameParser{}, + IsDirective(llvm::omp::Directive::OMPD_requires)) >= + Parser{}))) // 2.15.2 Threadprivate directive TYPE_PARSER(sourced( // @@ -1865,7 +1860,9 @@ TYPE_PARSER( // Assumes Construct TYPE_PARSER(sourced(construct( - verbatim("ASSUMES"_tok), Parser{}))) + predicated(OmpDirectiveNameParser{}, + IsDirective(llvm::omp::Directive::OMPD_assumes)) >= + Parser{}))) // Declarative constructs TYPE_PARSER( diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp index c9774dd137d2b..0511f5bdf7478 100644 --- a/flang/lib/Parser/unparse.cpp +++ b/flang/lib/Parser/unparse.cpp @@ -2345,6 +2345,13 @@ class UnparseVisitor { } } } + void Unparse(const OmpLoopRangeClause &x) { + Word("LOOPRANGE("); + Walk(std::get<0>(x.t)); + Put(", "); + Walk(std::get<1>(x.t)); + Put(")"); + } void Unparse(const OmpReductionClause &x) { using Modifier = OmpReductionClause::Modifier; Walk(std::get>>(x.t), ": "); @@ -2492,9 +2499,6 @@ class UnparseVisitor { void Unparse(const OpenMPCriticalConstruct &x) { Unparse(static_cast(x)); } - void Unparse(const OmpDeclareTargetWithList &x) { - Put("("), Walk(x.v), Put(")"); - } void Unparse(const OmpInitializerProc &x) { Walk(std::get(x.t)); Put("("); @@ -2561,8 +2565,8 @@ class UnparseVisitor { void Unparse(const OpenMPDeclarativeAssumes &x) { BeginOpenMP(); - Word("!$OMP ASSUMES "); - Walk(std::get(x.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } @@ -2582,8 +2586,8 @@ class UnparseVisitor { } void Unparse(const OpenMPDeclareTargetConstruct &x) { BeginOpenMP(); - Word("!$OMP DECLARE TARGET "); - Walk(std::get(x.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } @@ -2597,10 +2601,10 @@ class UnparseVisitor { Put("\n"); EndOpenMP(); } - void Unparse(const OpenMPRequiresConstruct &y) { + void Unparse(const OpenMPRequiresConstruct &x) { BeginOpenMP(); - Word("!$OMP REQUIRES "); - Walk(std::get(y.t)); + Word("!$OMP "); + Walk(x.v); Put("\n"); EndOpenMP(); } diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp index 57bc53cfe8561..db030bbe1f023 100644 --- a/flang/lib/Semantics/check-omp-structure.cpp +++ b/flang/lib/Semantics/check-omp-structure.cpp @@ -620,23 +620,10 @@ template struct DirectiveSpellingVisitor { checker_(GetDirName(x.t).source, Directive::OMPD_allocators); return false; } - bool Pre(const parser::OpenMPDeclarativeAssumes &x) { - checker_(std::get(x.t).source, Directive::OMPD_assumes); - return false; - } - bool Pre(const parser::OpenMPDeclareTargetConstruct &x) { - checker_( - std::get(x.t).source, Directive::OMPD_declare_target); - return false; - } bool Pre(const parser::OpenMPGroupprivate &x) { checker_(x.v.DirName().source, Directive::OMPD_groupprivate); return false; } - bool Pre(const parser::OpenMPRequiresConstruct &x) { - checker_(std::get(x.t).source, Directive::OMPD_requires); - return false; - } bool Pre(const parser::OmpBeginDirective &x) { checker_(x.DirName().source, x.DirId()); return false; @@ -1507,14 +1494,13 @@ void OmpStructureChecker::Leave(const parser::OpenMPDepobjConstruct &x) { } void OmpStructureChecker::Enter(const parser::OpenMPRequiresConstruct &x) { - const auto &dir{std::get(x.t)}; - PushContextAndClauseSets(dir.source, llvm::omp::Directive::OMPD_requires); + const auto &dirName{x.v.DirName()}; + PushContextAndClauseSets(dirName.source, dirName.v); if (visitedAtomicSource_.empty()) { return; } - const auto &clauseList{std::get(x.t)}; - for (const parser::OmpClause &clause : clauseList.v) { + for (const parser::OmpClause &clause : x.v.Clauses().v) { llvm::omp::Clause id{clause.Id()}; if (id == llvm::omp::Clause::OMPC_atomic_default_mem_order) { parser::MessageFormattedText txt( @@ -1615,38 +1601,6 @@ void OmpStructureChecker::Enter(const parser::OmpClause::Allocate &x) { } } -void OmpStructureChecker::Enter(const parser::OmpDeclareTargetWithClause &x) { - SetClauseSets(llvm::omp::Directive::OMPD_declare_target); -} - -void OmpStructureChecker::Leave(const parser::OmpDeclareTargetWithClause &x) { - if (x.v.v.size() > 0) { - const parser::OmpClause *enterClause = - FindClause(llvm::omp::Clause::OMPC_enter); - const parser::OmpClause *toClause = FindClause(llvm::omp::Clause::OMPC_to); - const parser::OmpClause *linkClause = - FindClause(llvm::omp::Clause::OMPC_link); - const parser::OmpClause *indirectClause = - FindClause(llvm::omp::Clause::OMPC_indirect); - if (!enterClause && !toClause && !linkClause) { - context_.Say(x.source, - "If the DECLARE TARGET directive has a clause, it must contain at least one ENTER clause or LINK clause"_err_en_US); - } - if (indirectClause && !enterClause) { - context_.Say(x.source, - "The INDIRECT clause cannot be used without the ENTER clause with the DECLARE TARGET directive."_err_en_US); - } - unsigned version{context_.langOptions().OpenMPVersion}; - if (toClause && version >= 52) { - context_.Warn(common::UsageWarning::OpenMPUsage, toClause->source, - "The usage of TO clause on DECLARE TARGET directive has been deprecated. Use ENTER clause instead."_warn_en_US); - } - if (indirectClause) { - CheckAllowedClause(llvm::omp::Clause::OMPC_indirect); - } - } -} - void OmpStructureChecker::Enter(const parser::OpenMPDeclareMapperConstruct &x) { const parser::OmpDirectiveName &dirName{x.v.DirName()}; PushContextAndClauseSets(dirName.source, dirName.v); @@ -1698,42 +1652,6 @@ void OmpStructureChecker::Leave( dirContext_.pop_back(); } -void OmpStructureChecker::Enter(const parser::OpenMPDeclareTargetConstruct &x) { - const auto &dir{std::get(x.t)}; - PushContext(dir.source, llvm::omp::Directive::OMPD_declare_target); -} - -void OmpStructureChecker::Enter(const parser::OmpDeclareTargetWithList &x) { - SymbolSourceMap symbols; - GetSymbolsInObjectList(x.v, symbols); - for (auto &[symbol, source] : symbols) { - const GenericDetails *genericDetails = symbol->detailsIf(); - if (genericDetails) { - context_.Say(source, - "The procedure '%s' in DECLARE TARGET construct cannot be a generic name."_err_en_US, - symbol->name()); - genericDetails->specific(); - } - if (IsProcedurePointer(*symbol)) { - context_.Say(source, - "The procedure '%s' in DECLARE TARGET construct cannot be a procedure pointer."_err_en_US, - symbol->name()); - } - const SubprogramDetails *entryDetails = - symbol->detailsIf(); - if (entryDetails && entryDetails->entryScope()) { - context_.Say(source, - "The procedure '%s' in DECLARE TARGET construct cannot be an entry name."_err_en_US, - symbol->name()); - } - if (IsStmtFunction(*symbol)) { - context_.Say(source, - "The procedure '%s' in DECLARE TARGET construct cannot be a statement function."_err_en_US, - symbol->name()); - } - } -} - void OmpStructureChecker::CheckSymbolName( const parser::CharBlock &source, const parser::OmpObject &object) { common::visit( @@ -1766,62 +1684,138 @@ void OmpStructureChecker::CheckSymbolNames( } } +void OmpStructureChecker::Enter(const parser::OpenMPDeclareTargetConstruct &x) { + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + PushContext(dirName.source, dirName.v); + + // Check if arguments are extended-list-items. + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + const Symbol *symbol{GetArgumentSymbol(arg)}; + if (!symbol) { + context_.Say(arg.source, + "An argument to the DECLARE TARGET directive should be an extended-list-item"_err_en_US); + continue; + } + const GenericDetails *genericDetails = symbol->detailsIf(); + if (genericDetails) { + context_.Say(arg.source, + "The procedure '%s' in DECLARE TARGET construct cannot be a generic name."_err_en_US, + symbol->name()); + genericDetails->specific(); + } + if (IsProcedurePointer(*symbol)) { + context_.Say(arg.source, + "The procedure '%s' in DECLARE TARGET construct cannot be a procedure pointer."_err_en_US, + symbol->name()); + } + const SubprogramDetails *entryDetails = + symbol->detailsIf(); + if (entryDetails && entryDetails->entryScope()) { + context_.Say(arg.source, + "The procedure '%s' in DECLARE TARGET construct cannot be an entry name."_err_en_US, + symbol->name()); + } + if (IsStmtFunction(*symbol)) { + context_.Say(arg.source, + "The procedure '%s' in DECLARE TARGET construct cannot be a statement function."_err_en_US, + symbol->name()); + } + } + + // Check if there are arguments or clauses, but not both. + if (!x.v.Clauses().v.empty()) { + if (!x.v.Arguments().v.empty()) { + context_.Say(x.source, + "DECLARE TARGET directive can have argument or clauses, but not both"_err_en_US); + } + SetClauseSets(llvm::omp::Directive::OMPD_declare_target); + } +} + void OmpStructureChecker::Leave(const parser::OpenMPDeclareTargetConstruct &x) { - const auto &dir{std::get(x.t)}; - const auto &spec{std::get(x.t)}; + const parser::OmpDirectiveName &dirName{x.v.DirName()}; + // Handle both forms of DECLARE TARGET. // - Extended list: It behaves as if there was an ENTER/TO clause with the // list of objects as argument. It accepts no explicit clauses. // - With clauses. - if (const auto *objectList{parser::Unwrap(spec.u)}) { - deviceConstructFound_ = true; - CheckSymbolNames(dir.source, *objectList); - CheckVarIsNotPartOfAnotherVar(dir.source, *objectList); - CheckThreadprivateOrDeclareTargetVar(*objectList); - } else if (const auto *clauseList{ - parser::Unwrap(spec.u)}) { - bool toClauseFound{false}, deviceTypeClauseFound{false}, - enterClauseFound{false}; - for (const auto &clause : clauseList->v) { - common::visit( - common::visitors{ - [&](const parser::OmpClause::To &toClause) { - toClauseFound = true; - auto &objList{std::get(toClause.v.t)}; - CheckSymbolNames(dir.source, objList); - CheckVarIsNotPartOfAnotherVar(dir.source, objList); - CheckThreadprivateOrDeclareTargetVar(objList); - }, - [&](const parser::OmpClause::Link &linkClause) { - CheckSymbolNames(dir.source, linkClause.v); - CheckVarIsNotPartOfAnotherVar(dir.source, linkClause.v); - CheckThreadprivateOrDeclareTargetVar(linkClause.v); - }, - [&](const parser::OmpClause::Enter &enterClause) { - enterClauseFound = true; - auto &objList{std::get(enterClause.v.t)}; - CheckSymbolNames(dir.source, objList); - CheckVarIsNotPartOfAnotherVar(dir.source, objList); - CheckThreadprivateOrDeclareTargetVar(objList); - }, - [&](const parser::OmpClause::DeviceType &deviceTypeClause) { - deviceTypeClauseFound = true; - if (deviceTypeClause.v.v != - parser::OmpDeviceTypeClause::DeviceTypeDescription::Host) { - // Function / subroutine explicitly marked as runnable by the - // target device. - deviceConstructFound_ = true; - } - }, - [&](const auto &) {}, - }, - clause.u); + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + if (auto *object{GetArgumentObject(arg)}) { + deviceConstructFound_ = true; + CheckSymbolName(dirName.source, *object); + CheckVarIsNotPartOfAnotherVar(dirName.source, *object); + CheckThreadprivateOrDeclareTargetVar(*object); + } + } - if ((toClauseFound || enterClauseFound) && !deviceTypeClauseFound) { - deviceConstructFound_ = true; - } + if (!x.v.Clauses().v.empty()) { + const parser::OmpClause *enterClause = + FindClause(llvm::omp::Clause::OMPC_enter); + const parser::OmpClause *toClause = FindClause(llvm::omp::Clause::OMPC_to); + const parser::OmpClause *linkClause = + FindClause(llvm::omp::Clause::OMPC_link); + const parser::OmpClause *indirectClause = + FindClause(llvm::omp::Clause::OMPC_indirect); + if (!enterClause && !toClause && !linkClause) { + context_.Say(x.source, + "If the DECLARE TARGET directive has a clause, it must contain at least one ENTER clause or LINK clause"_err_en_US); + } + if (indirectClause && !enterClause) { + context_.Say(x.source, + "The INDIRECT clause cannot be used without the ENTER clause with the DECLARE TARGET directive."_err_en_US); + } + unsigned version{context_.langOptions().OpenMPVersion}; + if (toClause && version >= 52) { + context_.Warn(common::UsageWarning::OpenMPUsage, toClause->source, + "The usage of TO clause on DECLARE TARGET directive has been deprecated. Use ENTER clause instead."_warn_en_US); + } + if (indirectClause) { + CheckAllowedClause(llvm::omp::Clause::OMPC_indirect); + } + } + + bool toClauseFound{false}, deviceTypeClauseFound{false}, + enterClauseFound{false}; + for (const parser::OmpClause &clause : x.v.Clauses().v) { + common::visit( + common::visitors{ + [&](const parser::OmpClause::To &toClause) { + toClauseFound = true; + auto &objList{std::get(toClause.v.t)}; + CheckSymbolNames(dirName.source, objList); + CheckVarIsNotPartOfAnotherVar(dirName.source, objList); + CheckThreadprivateOrDeclareTargetVar(objList); + }, + [&](const parser::OmpClause::Link &linkClause) { + CheckSymbolNames(dirName.source, linkClause.v); + CheckVarIsNotPartOfAnotherVar(dirName.source, linkClause.v); + CheckThreadprivateOrDeclareTargetVar(linkClause.v); + }, + [&](const parser::OmpClause::Enter &enterClause) { + enterClauseFound = true; + auto &objList{std::get(enterClause.v.t)}; + CheckSymbolNames(dirName.source, objList); + CheckVarIsNotPartOfAnotherVar(dirName.source, objList); + CheckThreadprivateOrDeclareTargetVar(objList); + }, + [&](const parser::OmpClause::DeviceType &deviceTypeClause) { + deviceTypeClauseFound = true; + if (deviceTypeClause.v.v != + parser::OmpDeviceTypeClause::DeviceTypeDescription::Host) { + // Function / subroutine explicitly marked as runnable by the + // target device. + deviceConstructFound_ = true; + } + }, + [&](const auto &) {}, + }, + clause.u); + + if ((toClauseFound || enterClauseFound) && !deviceTypeClauseFound) { + deviceConstructFound_ = true; } } + dirContext_.pop_back(); } @@ -3112,6 +3106,12 @@ CHECK_REQ_CONSTANT_SCALAR_INT_CLAUSE(Collapse, OMPC_collapse) CHECK_REQ_CONSTANT_SCALAR_INT_CLAUSE(Safelen, OMPC_safelen) CHECK_REQ_CONSTANT_SCALAR_INT_CLAUSE(Simdlen, OMPC_simdlen) +void OmpStructureChecker::Enter(const parser::OmpClause::Looprange &x) { + context_.Say(GetContext().clauseSource, + "LOOPRANGE clause is not implemented yet"_err_en_US, + ContextDirectiveAsFortran()); +} + // Restrictions specific to each clause are implemented apart from the // generalized restrictions. diff --git a/flang/lib/Semantics/check-omp-structure.h b/flang/lib/Semantics/check-omp-structure.h index 176f6568814c5..193784555a887 100644 --- a/flang/lib/Semantics/check-omp-structure.h +++ b/flang/lib/Semantics/check-omp-structure.h @@ -113,9 +113,6 @@ class OmpStructureChecker void Leave(const parser::OpenMPDeclareTargetConstruct &); void Enter(const parser::OpenMPDepobjConstruct &); void Leave(const parser::OpenMPDepobjConstruct &); - void Enter(const parser::OmpDeclareTargetWithList &); - void Enter(const parser::OmpDeclareTargetWithClause &); - void Leave(const parser::OmpDeclareTargetWithClause &); void Enter(const parser::OpenMPDispatchConstruct &); void Leave(const parser::OpenMPDispatchConstruct &); void Enter(const parser::OmpErrorDirective &); diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp index 218e3e7266ca9..a4c8922f58c6c 100644 --- a/flang/lib/Semantics/resolve-directives.cpp +++ b/flang/lib/Semantics/resolve-directives.cpp @@ -523,7 +523,7 @@ class OmpAttributeVisitor : DirectiveAttributeVisitor { // Gather information from the clauses. Flags flags; std::optional memOrder; - for (const auto &clause : std::get(x.t).v) { + for (const parser::OmpClause &clause : x.v.Clauses().v) { flags |= common::visit( common::visitors{ [&memOrder]( @@ -2324,22 +2324,17 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPCriticalConstruct &x) { bool OmpAttributeVisitor::Pre(const parser::OpenMPDeclareTargetConstruct &x) { PushContext(x.source, llvm::omp::Directive::OMPD_declare_target); - const auto &spec{std::get(x.t)}; - if (const auto *objectList{parser::Unwrap(spec.u)}) { - ResolveOmpObjectList(*objectList, Symbol::Flag::OmpDeclareTarget); - } else if (const auto *clauseList{ - parser::Unwrap(spec.u)}) { - for (const auto &clause : clauseList->v) { - if (const auto *toClause{std::get_if(&clause.u)}) { - auto &objList{std::get(toClause->v.t)}; - ResolveOmpObjectList(objList, Symbol::Flag::OmpDeclareTarget); - } else if (const auto *linkClause{ - std::get_if(&clause.u)}) { - ResolveOmpObjectList(linkClause->v, Symbol::Flag::OmpDeclareTarget); - } else if (const auto *enterClause{ - std::get_if(&clause.u)}) { - ResolveOmpObjectList(std::get(enterClause->v.t), - Symbol::Flag::OmpDeclareTarget); + + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + if (auto *object{omp::GetArgumentObject(arg)}) { + ResolveOmpObject(*object, Symbol::Flag::OmpDeclareTarget); + } + } + + for (const parser::OmpClause &clause : x.v.Clauses().v) { + if (auto *objects{parser::omp::GetOmpObjectList(clause)}) { + for (const parser::OmpObject &object : objects->v) { + ResolveOmpObject(object, Symbol::Flag::OmpDeclareTarget); } } } diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp index 43b49e01c89c7..2f350f016c1f5 100644 --- a/flang/lib/Semantics/resolve-names.cpp +++ b/flang/lib/Semantics/resolve-names.cpp @@ -24,6 +24,7 @@ #include "flang/Evaluate/intrinsics.h" #include "flang/Evaluate/tools.h" #include "flang/Evaluate/type.h" +#include "flang/Parser/openmp-utils.h" #include "flang/Parser/parse-tree-visitor.h" #include "flang/Parser/parse-tree.h" #include "flang/Parser/tools.h" @@ -1572,51 +1573,41 @@ class OmpVisitor : public virtual DeclarationVisitor { } void Post(const parser::OpenMPThreadprivate &) { SkipImplicitTyping(false); } bool Pre(const parser::OpenMPDeclareTargetConstruct &x) { - const auto &spec{std::get(x.t)}; - auto populateDeclareTargetNames{[this](const parser::OmpObjectList - &objectList) { - for (const auto &ompObject : objectList.v) { - common::visit( - common::visitors{ - [&](const parser::Designator &designator) { - if (const auto *name{ - semantics::getDesignatorNameIfDataRef(designator)}) { - specPartState_.declareTargetNames.insert(name->source); - } - }, - [&](const parser::Name &name) { - specPartState_.declareTargetNames.insert(name.source); - }, - [&](const parser::OmpObject::Invalid &invalid) { - switch (invalid.v) { - SWITCH_COVERS_ALL_CASES - case parser::OmpObject::Invalid::Kind::BlankCommonBlock: - context().Say(invalid.source, - "Blank common blocks are not allowed as directive or clause arguments"_err_en_US); - break; - } - }, - }, - ompObject.u); - } + auto addObjectName{[&](const parser::OmpObject &object) { + common::visit( + common::visitors{ + [&](const parser::Designator &designator) { + if (const auto *name{ + semantics::getDesignatorNameIfDataRef(designator)}) { + specPartState_.declareTargetNames.insert(name->source); + } + }, + [&](const parser::Name &name) { + specPartState_.declareTargetNames.insert(name.source); + }, + [&](const parser::OmpObject::Invalid &invalid) { + switch (invalid.v) { + SWITCH_COVERS_ALL_CASES + case parser::OmpObject::Invalid::Kind::BlankCommonBlock: + context().Say(invalid.source, + "Blank common blocks are not allowed as directive or clause arguments"_err_en_US); + break; + } + }, + }, + object.u); }}; - if (const auto *objectList{parser::Unwrap(spec.u)}) { - populateDeclareTargetNames(*objectList); - } else if (const auto *clauseList{ - parser::Unwrap(spec.u)}) { - for (const auto &clause : clauseList->v) { - if (const auto *toClause{ - std::get_if(&clause.u)}) { - populateDeclareTargetNames( - std::get(toClause->v.t)); - } else if (const auto *linkClause{ - std::get_if(&clause.u)}) { - populateDeclareTargetNames(linkClause->v); - } else if (const auto *enterClause{ - std::get_if(&clause.u)}) { - populateDeclareTargetNames( - std::get(enterClause->v.t)); + for (const parser::OmpArgument &arg : x.v.Arguments().v) { + if (auto *object{omp::GetArgumentObject(arg)}) { + addObjectName(*object); + } + } + + for (const parser::OmpClause &clause : x.v.Clauses().v) { + if (auto *objects{parser::omp::GetOmpObjectList(clause)}) { + for (const parser::OmpObject &object : objects->v) { + addObjectName(object); } } } diff --git a/flang/test/Driver/split-debug.f90 b/flang/test/Driver/split-debug.f90 new file mode 100644 index 0000000000000..1cb9f84d7a5d2 --- /dev/null +++ b/flang/test/Driver/split-debug.f90 @@ -0,0 +1,44 @@ +! Test -gsplit-dwarf and -gsplit-dwarf={split,single}. + +! RUN: %flang -### -c -target x86_64 -g -gsplit-dwarf %s 2>&1 | FileCheck %s --check-prefixes=SPLIT +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf -g %s 2>&1 | FileCheck %s --check-prefixes=SPLIT +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf=split -g %s 2>&1 | FileCheck %s --check-prefixes=SPLIT + +! SPLIT: "-split-dwarf-file" "split-debug.dwo" "-split-dwarf-output" "split-debug.dwo" + +! Check warning on non-supported platforms. +! RUN: %flang -### -c -target x86_64-apple-darwin -gsplit-dwarf -g %s 2>&1 | FileCheck %s --check-prefix=WARN +! WARN: warning: debug information option '-gsplit-dwarf' is not supported for target 'x86_64-apple-darwin' + +! -gno-split-dwarf disables debug fission. +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf -g -gno-split-dwarf %s 2>&1 | FileCheck %s --check-prefix=NOSPLIT +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf=single -g -gno-split-dwarf %s 2>&1 | FileCheck %s --check-prefix=NOSPLIT +! RUN: %flang -### -c -target x86_64 -gno-split-dwarf -g -gsplit-dwarf %s 2>&1 | FileCheck %s --check-prefixes=SPLIT + +! NOSPLIT-NOT: "-split-dwarf + +! Test -gsplit-dwarf=single. +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf=single -g %s 2>&1 | FileCheck %s --check-prefix=SINGLE + +! SINGLE: "-split-dwarf-file" "split-debug.o" +! SINGLE-NOT: "-split-dwarf-output" + +! RUN: %flang -### -c -target x86_64 -gsplit-dwarf=single -g -o %tfoo.o %s 2>&1 | FileCheck %s --check-prefix=SINGLE_WITH_FILENAME +! SINGLE_WITH_FILENAME: "-split-dwarf-file" "{{.*}}foo.o" +! SINGLE_WITH_FILENAME-NOT: "-split-dwarf-output" + + +! Invoke objcopy if not using the integrated assembler. +! RUN: %flang -### -c -target x86_64-unknown-linux-gnu -fno-integrated-as -gsplit-dwarf -g %s 2>&1 | FileCheck %s --check-prefix=OBJCOPY +! OBJCOPY: objcopy{{(.exe)?}} +! OBJCOPY-SAME: --extract-dwo +! OBJCOPY-NEXT: objcopy{{(.exe)?}} +! OBJCOPY-SAME: --strip-dwo + +! RUN: not %flang -target powerpc-ibm-aix -gdwarf-4 -gsplit-dwarf %s 2>&1 \ +! RUN: | FileCheck %s --check-prefix=UNSUP_OPT_AIX +! RUN: not %flang -target powerpc64-ibm-aix -gdwarf-4 -gsplit-dwarf %s 2>&1 \ +! RUN: | FileCheck %s --check-prefix=UNSUP_OPT_AIX64 + +! UNSUP_OPT_AIX: error: unsupported option '-gsplit-dwarf' for target 'powerpc-ibm-aix' +! UNSUP_OPT_AIX64: error: unsupported option '-gsplit-dwarf' for target 'powerpc64-ibm-aix' diff --git a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir index 24e5cad84b709..38d51110bbde3 100644 --- a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir +++ b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir @@ -349,7 +349,7 @@ func.func @_QPopenmp_target_data_region() { %3 = fir.convert %c1024_i32 : (i32) -> index %c1 = arith.constant 1 : index %4 = fir.convert %2 : (index) -> i32 - %5:2 = fir.do_loop %arg0 = %2 to %3 step %c1 iter_args(%arg1 = %4) -> (index, i32) { + %5 = fir.do_loop %arg0 = %2 to %3 step %c1 iter_args(%arg1 = %4) -> (i32) { fir.store %arg1 to %1 : !fir.ref %6 = fir.load %1 : !fir.ref %7 = fir.load %1 : !fir.ref @@ -358,13 +358,12 @@ func.func @_QPopenmp_target_data_region() { %9 = arith.subi %8, %c1_i64 : i64 %10 = fir.coordinate_of %0, %9 : (!fir.ref>, i64) -> !fir.ref fir.store %6 to %10 : !fir.ref - %11 = arith.addi %arg0, %c1 overflow : index %12 = fir.convert %c1 : (index) -> i32 %13 = fir.load %1 : !fir.ref %14 = arith.addi %13, %12 overflow : i32 - fir.result %11, %14 : index, i32 + fir.result %14 : i32 } - fir.store %5#1 to %1 : !fir.ref + fir.store %5 to %1 : !fir.ref omp.terminator } return @@ -404,7 +403,6 @@ func.func @_QPopenmp_target_data_region() { // CHECK: %[[VAL_21:.*]] = llvm.sub %[[VAL_19]], %[[VAL_20]] : i64 // CHECK: %[[VAL_22:.*]] = llvm.getelementptr %[[VAL_1]][0, %[[VAL_21]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK: llvm.store %[[VAL_17]], %[[VAL_22]] : i32, !llvm.ptr -// CHECK: %[[VAL_23:.*]] = llvm.add %[[VAL_12]], %[[VAL_8]] overflow : i64 // CHECK: %[[VAL_24:.*]] = llvm.trunc %[[VAL_8]] : i64 to i32 // CHECK: %[[VAL_25:.*]] = llvm.load %[[VAL_3]] : !llvm.ptr -> i32 // CHECK: %[[VAL_26:.*]] = llvm.add %[[VAL_25]], %[[VAL_24]] overflow : i32 @@ -653,18 +651,17 @@ func.func @_QPsb() { omp.sections { omp.section { %2 = fir.convert %c1 : (index) -> i32 - %3:2 = fir.do_loop %arg0 = %c1 to %c10 step %c1 iter_args(%arg1 = %2) -> (index, i32) { + %3 = fir.do_loop %arg0 = %c1 to %c10 step %c1 iter_args(%arg1 = %2) -> (i32) { fir.store %arg1 to %0 : !fir.ref %4 = fir.load %1 : !fir.ref %5 = arith.addi %4, %c1_i32 : i32 fir.store %5 to %1 : !fir.ref - %6 = arith.addi %arg0, %c1 : index %7 = fir.convert %c1 : (index) -> i32 %8 = fir.load %0 : !fir.ref %9 = arith.addi %8, %7 : i32 - fir.result %6, %9 : index, i32 + fir.result %9 : i32 } - fir.store %3#1 to %0 : !fir.ref + fir.store %3 to %0 : !fir.ref omp.terminator } omp.terminator diff --git a/flang/test/Integration/debug-split-dwarf.f90 b/flang/test/Integration/debug-split-dwarf.f90 new file mode 100644 index 0000000000000..60373efddc358 --- /dev/null +++ b/flang/test/Integration/debug-split-dwarf.f90 @@ -0,0 +1,21 @@ +! REQUIRES: x86-registered-target + +! Testing to ensure that setting only -split-dwarf-file allows to place +! .dwo sections into regular output object. +! RUN: %flang_fc1 -debug-info-kind=standalone -triple x86_64-unknown-linux \ +! RUN: -split-dwarf-file %t.o -emit-obj -o %t.o %s +! RUN: llvm-readobj -S %t.o | FileCheck --check-prefix=DWO %s + +! Testing to ensure that setting both -split-dwarf-file and -split-dwarf-output +! does not place .dwo sections into regular output object but in a separate +! file. +! RUN: %flang_fc1 -debug-info-kind=standalone -triple x86_64-unknown-linux \ +! RUN: -split-dwarf-file %t.dwo -split-dwarf-output %t.dwo -emit-obj -o %t.o %s +! RUN: llvm-readobj -S %t.dwo | FileCheck --check-prefix=DWO %s +! RUN: llvm-readobj -S %t.o | FileCheck --check-prefix=SPLIT %s + +! DWO: .dwo +! SPLIT-NOT: .dwo + +program test +end program test diff --git a/flang/test/Lower/OpenACC/acc-declare.f90 b/flang/test/Lower/OpenACC/acc-declare.f90 index edae0e6a4d37e..46c4365f23fd6 100644 --- a/flang/test/Lower/OpenACC/acc-declare.f90 +++ b/flang/test/Lower/OpenACC/acc-declare.f90 @@ -20,7 +20,7 @@ subroutine acc_declare_copy() ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_copyEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[COPYIN]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (i32) { ! CHECK: } ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[COPYIN]] : !fir.ref>) ! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref>) to varPtr(%[[DECL]]#0 : !fir.ref>) {dataClause = #acc, name = "a"} @@ -40,7 +40,7 @@ subroutine acc_declare_create() ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_createEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (i32) { ! CHECK: } ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref>) ! CHECK: acc.delete accPtr(%[[CREATE]] : !fir.ref>) {dataClause = #acc, name = "a"} @@ -60,7 +60,7 @@ subroutine acc_declare_present(a) ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_presentEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[PRESENT]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[PRESENT]] : !fir.ref>) ! CHECK: acc.delete accPtr(%[[PRESENT]] : !fir.ref>) {dataClause = #acc, name = "a"} @@ -81,7 +81,7 @@ subroutine acc_declare_copyin() ! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[ADECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[BDECL]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "b"} ! CHECK: acc.declare_enter dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref>, !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) ! CHECK: acc.delete accPtr(%[[COPYIN_A]] : !fir.ref>) {dataClause = #acc, name = "a"} ! CHECK: acc.delete accPtr(%[[COPYIN_B]] : !fir.ref>) {dataClause = #acc, name = "b"} @@ -99,7 +99,7 @@ subroutine acc_declare_copyout() ! CHECK: %[[ADECL:.*]]:2 = hlfir.declare %[[A]](%{{.*}}) {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_copyoutEa"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADECL]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref>) ! CHECK: acc.copyout accPtr(%[[CREATE]] : !fir.ref>) to varPtr(%[[ADECL]]#0 : !fir.ref>) {name = "a"} ! CHECK: return @@ -118,7 +118,7 @@ subroutine acc_declare_deviceptr(a) ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_deviceptrEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[DEVICEPTR:.*]] = acc.deviceptr varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: acc.declare_enter dataOperands(%[[DEVICEPTR]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) subroutine acc_declare_link(a) integer :: a(100), i @@ -134,7 +134,7 @@ subroutine acc_declare_link(a) ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_linkEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[LINK:.*]] = acc.declare_link varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: acc.declare_enter dataOperands(%[[LINK]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) subroutine acc_declare_device_resident(a) integer :: a(100), i @@ -150,7 +150,7 @@ subroutine acc_declare_device_resident(a) ! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare, uniq_name = "_QMacc_declareFacc_declare_device_residentEa"} : (!fir.ref>, !fir.shape<1>, !fir.dscope) -> (!fir.ref>, !fir.ref>) ! CHECK: %[[DEVICERES:.*]] = acc.declare_device_resident varPtr(%[[DECL]]#0 : !fir.ref>) -> !fir.ref> {name = "a"} ! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[DEVICERES]] : !fir.ref>) -! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32) +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (i32) ! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[DEVICERES]] : !fir.ref>) ! CHECK: acc.delete accPtr(%[[DEVICERES]] : !fir.ref>) {dataClause = #acc, name = "a"} @@ -279,7 +279,7 @@ subroutine acc_declare_multiple_directive(a, b) ! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL_A]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "a"} ! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL_B]]#0 : !fir.ref>) -> !fir.ref> {dataClause = #acc, name = "b"} ! CHECK: acc.declare_enter dataOperands(%[[COPYIN]], %[[CREATE]] : !fir.ref>, !fir.ref>) -! CHECK: %{{.*}}:{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { +! CHECK: %{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (i32) { ! CHECK: acc.copyout accPtr(%[[CREATE]] : !fir.ref>) to varPtr(%[[DECL_B]]#0 : !fir.ref>) {name = "b"} diff --git a/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 b/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 index 0c0d877a17b00..642b11bcd6b75 100644 --- a/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 +++ b/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 @@ -22,10 +22,10 @@ subroutine sb1 !CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_ADDR]] {uniq_name = "_QFsb1Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) !CHECK: omp.parallel private({{.*}} %[[I_DECL]]#0 -> %[[I_PVT_ADDR:.*]] : {{.*}}) { !CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ADDR]] {uniq_name = "_QFsb1Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: %[[I_FINAL_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (index, i32) { +!CHECK: %[[I_FINAL_VAL:.*]] = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (i32) { !CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: fir.store %[[I_FINAL_VAL]]#1 to %[[I_PVT_DECL]]#0 : !fir.ref +!CHECK: fir.store %[[I_FINAL_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: omp.terminator !CHECK: } !CHECK: return @@ -58,20 +58,20 @@ subroutine sb2 !CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ADDR]] {uniq_name = "_QFsb2Ei"} : (!fir.ref) -> (!fir.ref, !fir.ref) -!CHECK: %[[FINAL_J_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[J_VAL:.*]] = %{{.*}}) -> (index, i32) { +!CHECK: %[[FINAL_J_VAL:.*]] = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[J_VAL:.*]] = %{{.*}}) -> (i32) { !CHECK: fir.store %[[J_VAL]] to %[[J_PVT_DECL]]#0 : !fir.ref !CHECK: fir.if %{{.*}} { -!CHECK: %[[FINAL_I_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (index, i32) { +!CHECK: %[[FINAL_I_VAL:.*]] = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (i32) { !CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: fir.store %[[FINAL_I_VAL]]#1 to %[[I_PVT_DECL]]#0 : !fir.ref +!CHECK: fir.store %[[FINAL_I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: %[[FINAL_I_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (index, i32) { +!CHECK: %[[FINAL_I_VAL:.*]] = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (i32) { !CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: fir.store %[[FINAL_I_VAL]]#1 to %[[I_PVT_DECL]]#0 : !fir.ref +!CHECK: fir.store %[[FINAL_I_VAL]] to %[[I_PVT_DECL]]#0 : !fir.ref !CHECK: } -!CHECK: fir.store %[[FINAL_J_VAL]]#1 to %[[J_PVT_DECL]]#0 : !fir.ref +!CHECK: fir.store %[[FINAL_J_VAL]] to %[[J_PVT_DECL]]#0 : !fir.ref !CHECK: omp.terminator !CHECK: } !CHECK: return diff --git a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 index 4d1023925fd88..3bb40834afe4c 100644 --- a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 +++ b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 @@ -59,21 +59,20 @@ ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_9]] : (i32) -> index ! CHECK: %[[VAL_11:.*]] = arith.constant 1 : index ! CHECK: %[[LB:.*]] = fir.convert %[[VAL_8]] : (index) -> i32 -! CHECK: %[[VAL_12:.*]]:2 = fir.do_loop %[[VAL_13:[^ ]*]] = +! CHECK: %[[VAL_12:.*]] = fir.do_loop %[[VAL_13:[^ ]*]] = ! CHECK-SAME: %[[VAL_8]] to %[[VAL_10]] step %[[VAL_11]] -! CHECK-SAME: iter_args(%[[IV:.*]] = %[[LB]]) -> (index, i32) { +! CHECK-SAME: iter_args(%[[IV:.*]] = %[[LB]]) -> (i32) { ! CHECK: fir.store %[[IV]] to %[[PRIV_J_DECL]]#0 : !fir.ref ! CHECK: %[[LOAD:.*]] = fir.load %[[PRIV_I_DECL]]#0 : !fir.ref ! CHECK: %[[VAL_15:.*]] = fir.load %[[PRIV_J_DECL]]#0 : !fir.ref ! CHECK: %[[VAL_16:.*]] = arith.addi %[[LOAD]], %[[VAL_15]] : i32 ! CHECK: hlfir.assign %[[VAL_16]] to %[[PRIV_X_DECL]]#0 : i32, !fir.ref -! CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_13]], %[[VAL_11]] overflow : index ! CHECK: %[[STEPCAST:.*]] = fir.convert %[[VAL_11]] : (index) -> i32 ! CHECK: %[[IVLOAD:.*]] = fir.load %[[PRIV_J_DECL]]#0 : !fir.ref ! CHECK: %[[IVINC:.*]] = arith.addi %[[IVLOAD]], %[[STEPCAST]] overflow : -! CHECK: fir.result %[[VAL_17]], %[[IVINC]] : index, i32 +! CHECK: fir.result %[[IVINC]] : i32 ! CHECK: } -! CHECK: fir.store %[[VAL_12]]#1 to %[[PRIV_J_DECL]]#0 : !fir.ref +! CHECK: fir.store %[[VAL_12]] to %[[PRIV_J_DECL]]#0 : !fir.ref ! CHECK: omp.yield ! CHECK: } ! CHECK: } diff --git a/flang/test/Lower/OpenMP/sections-predetermined-private.f90 b/flang/test/Lower/OpenMP/sections-predetermined-private.f90 index 3ca3b2219c91b..3313feb3d7021 100644 --- a/flang/test/Lower/OpenMP/sections-predetermined-private.f90 +++ b/flang/test/Lower/OpenMP/sections-predetermined-private.f90 @@ -15,15 +15,15 @@ ! CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_3]] {uniq_name = "_QFEi"} : (!fir.ref) -> (!fir.ref, !fir.ref) ! CHECK: omp.sections { ! CHECK: omp.section { -! CHECK: %[[VAL_11:.*]]:2 = fir.do_loop %[[VAL_12:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}} -> (index, i32) { +! CHECK: %[[VAL_11:.*]] = fir.do_loop %[[VAL_12:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}} -> (i32) { ! CHECK: } -! CHECK: fir.store %[[VAL_11]]#1 to %[[VAL_4]]#0 : !fir.ref +! CHECK: fir.store %[[VAL_11]] to %[[VAL_4]]#0 : !fir.ref ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.section { -! CHECK: %[[VAL_25:.*]]:2 = fir.do_loop %[[VAL_26:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) { +! CHECK: %[[VAL_25:.*]] = fir.do_loop %[[VAL_26:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (i32) { ! CHECK: } -! CHECK: fir.store %[[VAL_25]]#1 to %[[VAL_4]]#0 : !fir.ref +! CHECK: fir.store %[[VAL_25]] to %[[VAL_4]]#0 : !fir.ref ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.terminator diff --git a/flang/test/Lower/OpenMP/shared-loop.f90 b/flang/test/Lower/OpenMP/shared-loop.f90 index a26cbd0b9e90e..48ad553752e4a 100644 --- a/flang/test/Lower/OpenMP/shared-loop.f90 +++ b/flang/test/Lower/OpenMP/shared-loop.f90 @@ -9,14 +9,14 @@ ! CHECK: omp.parallel { ! CHECK: omp.sections { ! CHECK: omp.section { -! CHECK: %[[RES:.*]]:2 = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = +! CHECK: %[[RES:.*]] = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = ! CHECK: fir.store %[[ARG1]] to %[[DECL_I]]#0 -! CHECK: %[[UPDATE_ITER:.*]] = arith.addi %[[ARG0]], %{{.*}} +! CHECK: hlfir.assign ! CHECK: %[[LOAD_I:.*]] = fir.load %[[DECL_I]]#0 ! CHECK: %[[RES_I:.*]] = arith.addi %[[LOAD_I]], %{{.*}} -! CHECK: fir.result %[[UPDATE_ITER]], %[[RES_I]] +! CHECK: fir.result %[[RES_I]] ! CHECK: } -! CHECK: fir.store %[[RES]]#1 to %[[DECL_I]]#0 +! CHECK: fir.store %[[RES]] to %[[DECL_I]]#0 ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.terminator @@ -47,15 +47,15 @@ subroutine omploop ! CHECK: %[[DECL_PRIV_I:.*]]:2 = hlfir.declare %[[ALLOC_PRIV_I]] ! CHECK: omp.sections { ! CHECK: omp.section { -! CHECK: %[[RES:.*]]:2 = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = +! CHECK: %[[RES:.*]] = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = ! CHECK-NOT: fir.store %[[ARG1]] to %[[DECL_I]]#1 ! CHECK: fir.store %[[ARG1]] to %[[DECL_PRIV_I]]#0 -! CHECK: %[[UPDATE_ITER:.*]] = arith.addi %[[ARG0]], %{{.*}} +! CHECK: hlfir.assign ! CHECK: %[[LOAD_I:.*]] = fir.load %[[DECL_PRIV_I]]#0 ! CHECK: %[[RES_I:.*]] = arith.addi %[[LOAD_I]], %{{.*}} -! CHECK: fir.result %[[UPDATE_ITER]], %[[RES_I]] +! CHECK: fir.result %[[RES_I]] ! CHECK: } -! CHECK: fir.store %[[RES]]#1 to %[[DECL_PRIV_I]]#0 +! CHECK: fir.store %[[RES]] to %[[DECL_PRIV_I]]#0 ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.terminator @@ -87,15 +87,15 @@ subroutine omploop2 ! CHECK: %[[DECL_PRIV_I:.*]]:2 = hlfir.declare %[[ALLOC_PRIV_I]] ! CHECK: omp.sections { ! CHECK: omp.section { -! CHECK: %[[RES:.*]]:2 = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = +! CHECK: %[[RES:.*]] = fir.do_loop %[[ARG0:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[ARG1:.*]] = ! CHECK-NOT: fir.store %[[ARG1]] to %[[DECL_I]]#1 ! CHECK: fir.store %[[ARG1]] to %[[DECL_PRIV_I]]#0 -! CHECK: %[[UPDATE_ITER:.*]] = arith.addi %[[ARG0]], %{{.*}} +! CHECK: hlfir.assign ! CHECK: %[[LOAD_I:.*]] = fir.load %[[DECL_PRIV_I]]#0 ! CHECK: %[[RES_I:.*]] = arith.addi %[[LOAD_I]], %{{.*}} -! CHECK: fir.result %[[UPDATE_ITER]], %[[RES_I]] +! CHECK: fir.result %[[RES_I]] ! CHECK: } -! CHECK: fir.store %[[RES]]#1 to %[[DECL_PRIV_I]]#0 +! CHECK: fir.store %[[RES]] to %[[DECL_PRIV_I]]#0 ! CHECK: omp.terminator ! CHECK: } ! CHECK: omp.terminator diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 index e2f75bc8e4481..a02188a7fb0db 100644 --- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 +++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable-array-minmax.f90 @@ -204,7 +204,7 @@ program reduce15 ! CHECK: %[[VAL_39:.*]] = fir.convert %[[VAL_38]] : (i32) -> index ! CHECK: %[[VAL_40:.*]] = arith.constant 1 : index ! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_37]] : (index) -> i32 -! CHECK: %[[VAL_42:.*]]:2 = fir.do_loop %[[VAL_43:.*]] = %[[VAL_37]] to %[[VAL_39]] step %[[VAL_40]] iter_args(%[[VAL_44:.*]] = %[[VAL_41]]) -> (index, i32) { +! CHECK: %[[VAL_42:.*]] = fir.do_loop %[[VAL_43:.*]] = %[[VAL_37]] to %[[VAL_39]] step %[[VAL_40]] iter_args(%[[VAL_44:.*]] = %[[VAL_41]]) -> (i32) { ! CHECK: fir.store %[[VAL_44]] to %[[VAL_3]]#0 : !fir.ref ! CHECK: %[[VAL_45:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref ! CHECK: %[[VAL_46:.*]] = fir.load %[[VAL_1]]#0 : !fir.ref>>> @@ -212,13 +212,12 @@ program reduce15 ! CHECK: %[[VAL_48:.*]] = fir.convert %[[VAL_47]] : (i32) -> i64 ! CHECK: %[[VAL_49:.*]] = hlfir.designate %[[VAL_46]] (%[[VAL_48]]) : (!fir.box>>, i64) -> !fir.ref ! CHECK: hlfir.assign %[[VAL_45]] to %[[VAL_49]] : i32, !fir.ref -! CHECK: %[[VAL_50:.*]] = arith.addi %[[VAL_43]], %[[VAL_40]] overflow : index ! CHECK: %[[VAL_51:.*]] = fir.convert %[[VAL_40]] : (index) -> i32 ! CHECK: %[[VAL_52:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref ! CHECK: %[[VAL_53:.*]] = arith.addi %[[VAL_52]], %[[VAL_51]] overflow : i32 -! CHECK: fir.result %[[VAL_50]], %[[VAL_53]] : index, i32 +! CHECK: fir.result %[[VAL_53]] : i32 ! CHECK: } -! CHECK: fir.store %[[VAL_54:.*]]#1 to %[[VAL_3]]#0 : !fir.ref +! CHECK: fir.store %[[VAL_54:.*]] to %[[VAL_3]]#0 : !fir.ref ! CHECK: omp.parallel { ! CHECK: %[[VAL_57:.*]] = arith.constant 1 : i32 ! CHECK: %[[VAL_58:.*]] = arith.constant 10 : i32 diff --git a/flang/test/Lower/OpenMP/wsloop-variable.f90 b/flang/test/Lower/OpenMP/wsloop-variable.f90 index 0f4aafb10ded3..60d970f3f0bac 100644 --- a/flang/test/Lower/OpenMP/wsloop-variable.f90 +++ b/flang/test/Lower/OpenMP/wsloop-variable.f90 @@ -139,7 +139,7 @@ subroutine wsloop_variable_sub !CHECK: %[[VAL_33:.*]] = fir.load %[[VAL_15]]#0 : !fir.ref !CHECK: %[[VAL_34:.*]] = fir.convert %[[VAL_33]] : (i32) -> index !CHECK: %[[VAL_35:.*]] = fir.convert %[[VAL_30]] : (index) -> i64 -!CHECK: %[[VAL_36:.*]]:2 = fir.do_loop %[[VAL_37:.*]] = %[[VAL_30]] to %[[VAL_32]] step %[[VAL_34]] iter_args(%[[VAL_38:.*]] = %[[VAL_35]]) -> (index, i64) { +!CHECK: %[[VAL_36:.*]] = fir.do_loop %[[VAL_37:.*]] = %[[VAL_30]] to %[[VAL_32]] step %[[VAL_34]] iter_args(%[[VAL_38:.*]] = %[[VAL_35]]) -> (i64) { !CHECK: fir.store %[[VAL_38]] to %[[VAL_17]]#0 : !fir.ref !CHECK: %[[VAL_39:.*]] = fir.load %[[VAL_3]]#0 : !fir.ref !CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_39]] : (i16) -> i64 @@ -147,13 +147,12 @@ subroutine wsloop_variable_sub !CHECK: %[[VAL_42:.*]] = arith.addi %[[VAL_40]], %[[VAL_41]] : i64 !CHECK: %[[VAL_43:.*]] = fir.convert %[[VAL_42]] : (i64) -> f32 !CHECK: hlfir.assign %[[VAL_43]] to %[[VAL_21]]#0 : f32, !fir.ref -!CHECK: %[[VAL_44:.*]] = arith.addi %[[VAL_37]], %[[VAL_34]] overflow : index !CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_34]] : (index) -> i64 !CHECK: %[[VAL_46:.*]] = fir.load %[[VAL_17]]#0 : !fir.ref !CHECK: %[[VAL_47:.*]] = arith.addi %[[VAL_46]], %[[VAL_45]] overflow : i64 -!CHECK: fir.result %[[VAL_44]], %[[VAL_47]] : index, i64 +!CHECK: fir.result %[[VAL_47]] : i64 !CHECK: } -!CHECK: fir.store %[[VAL_48:.*]]#1 to %[[VAL_17]]#0 : !fir.ref +!CHECK: fir.store %[[VAL_48:.*]] to %[[VAL_17]]#0 : !fir.ref !CHECK: omp.yield !CHECK: } !CHECK: } diff --git a/flang/test/Lower/do_loop.f90 b/flang/test/Lower/do_loop.f90 index 5d8343b8d68a4..065324ac200da 100644 --- a/flang/test/Lower/do_loop.f90 +++ b/flang/test/Lower/do_loop.f90 @@ -20,19 +20,18 @@ subroutine simple_loop ! CHECK: %[[C5_CVT:.*]] = fir.convert %c5_i32 : (i32) -> index ! CHECK: %[[C1:.*]] = arith.constant 1 : index ! CHECK: %[[LB:.*]] = fir.convert %[[C1_CVT]] : (index) -> i32 - ! CHECK: %[[LI_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[LI_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[C1_CVT]] to %[[C5_CVT]] step %[[C1]] - ! CHECK-SAME: iter_args(%[[IV:.*]] = %[[LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[IV:.*]] = %[[LB]]) -> (i32) { do i=1,5 ! CHECK: fir.store %[[IV]] to %[[I_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[C1]] overflow : index ! CHECK: %[[STEPCAST:.*]] = fir.convert %[[C1]] : (index) -> i32 ! CHECK: %[[IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[IVINC:.*]] = arith.addi %[[IVLOAD]], %[[STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LI_NEXT]], %[[IVINC]] : index, i32 + ! CHECK: fir.result %[[IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[LI_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[LI_RES]] to %[[I_REF]] : !fir.ref ! CHECK: %[[I:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %{{.*}} = fir.call @_FortranAioOutputInteger32(%{{.*}}, %[[I]]) {{.*}}: (!fir.ref, i32) -> i1 print *, i @@ -54,9 +53,9 @@ subroutine nested_loop ! CHECK: %[[E_I_CVT:.*]] = fir.convert %[[E_I]] : (i32) -> index ! CHECK: %[[ST_I:.*]] = arith.constant 1 : index ! CHECK: %[[I_LB:.*]] = fir.convert %[[S_I_CVT]] : (index) -> i32 - ! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[S_I_CVT]] to %[[E_I_CVT]] step %[[ST_I]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { do i=1,5 ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref ! CHECK: %[[S_J:.*]] = arith.constant 1 : i32 @@ -65,9 +64,9 @@ subroutine nested_loop ! CHECK: %[[E_J_CVT:.*]] = fir.convert %[[E_J]] : (i32) -> index ! CHECK: %[[ST_J:.*]] = arith.constant 1 : index ! CHECK: %[[J_LB:.*]] = fir.convert %[[S_J_CVT]] : (index) -> i32 - ! CHECK: %[[J_RES:.*]]:2 = fir.do_loop %[[LJ:[^ ]*]] = + ! CHECK: %[[J_RES:.*]] = fir.do_loop %[[LJ:[^ ]*]] = ! CHECK-SAME: %[[S_J_CVT]] to %[[E_J_CVT]] step %[[ST_J]] - ! CHECK-SAME: iter_args(%[[J_IV:.*]] = %[[J_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[J_IV:.*]] = %[[J_LB]]) -> (i32) { do j=1,5 ! CHECK: fir.store %[[J_IV]] to %[[J_REF]] : !fir.ref ! CHECK: %[[ASUM:.*]] = fir.load %[[ASUM_REF]] : !fir.ref @@ -84,22 +83,20 @@ subroutine nested_loop ! CHECK: %[[ASUM_NEW:.*]] = arith.addi %[[ASUM]], %[[ARR_VAL]] : i32 ! CHECK: fir.store %[[ASUM_NEW]] to %[[ASUM_REF]] : !fir.ref asum = asum + arr(i,j) - ! CHECK: %[[LJ_NEXT:.*]] = arith.addi %[[LJ]], %[[ST_J]] overflow : index ! CHECK: %[[J_STEPCAST:.*]] = fir.convert %[[ST_J]] : (index) -> i32 ! CHECK: %[[J_IVLOAD:.*]] = fir.load %[[J_REF]] : !fir.ref ! CHECK: %[[J_IVINC:.*]] = arith.addi %[[J_IVLOAD]], %[[J_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LJ_NEXT]], %[[J_IVINC]] : index, i32 + ! CHECK: fir.result %[[J_IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[J_RES]]#1 to %[[J_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[ST_I]] overflow : index + ! CHECK: fir.store %[[J_RES]] to %[[J_REF]] : !fir.ref ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[ST_I]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i32 + ! CHECK: fir.result %[[I_IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[I_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[I_RES]] to %[[I_REF]] : !fir.ref end subroutine ! Test a downcounting loop @@ -115,19 +112,18 @@ subroutine down_counting_loop() ! CHECK: %[[CMINUS1:.*]] = arith.constant -1 : i32 ! CHECK: %[[CMINUS1_STEP_CVT:.*]] = fir.convert %[[CMINUS1]] : (i32) -> index ! CHECK: %[[I_LB:.*]] = fir.convert %[[C5_CVT]] : (index) -> i32 - ! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[C5_CVT]] to %[[C1_CVT]] step %[[CMINUS1_STEP_CVT]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { do i=5,1,-1 ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[CMINUS1_STEP_CVT]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[CMINUS1_STEP_CVT]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i32 + ! CHECK: fir.result %[[I_IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[I_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[I_RES]] to %[[I_REF]] : !fir.ref end subroutine ! Test a general loop with a variable step @@ -143,19 +139,18 @@ subroutine loop_with_variable_step(s,e,st) ! CHECK: %[[ST:.*]] = fir.load %[[ST_REF]] : !fir.ref ! CHECK: %[[ST_CVT:.*]] = fir.convert %[[ST]] : (i32) -> index ! CHECK: %[[I_LB:.*]] = fir.convert %[[S_CVT]] : (index) -> i32 - ! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[S_CVT]] to %[[E_CVT]] step %[[ST_CVT]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { do i=s,e,st ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[ST_CVT]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[ST_CVT]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i32 + ! CHECK: fir.result %[[I_IVINC]] : i32 ! CHECK: } end do - ! CHECK: fir.store %[[I_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[I_RES]] to %[[I_REF]] : !fir.ref end subroutine ! Test usage of pointer variables as index, start, end and step variables @@ -195,19 +190,18 @@ subroutine loop_with_pointer_variables(s,e,st) ! CHECK: %[[ST:.*]] = fir.load %[[ST_PTR]] : !fir.ptr ! CHECK: %[[ST_CVT:.*]] = fir.convert %[[ST]] : (i32) -> index ! CHECK: %[[I_LB:.*]] = fir.convert %[[S_CVT]] : (index) -> i32 -! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = +! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[S_CVT]] to %[[E_CVT]] step %[[ST_CVT]] -! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { +! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { do iptr=sptr,eptr,stptr ! CHECK: fir.store %[[I_IV]] to %[[I_PTR]] : !fir.ptr -! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[ST_CVT]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[ST_CVT]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_PTR]] : !fir.ptr ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 -! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i32 +! CHECK: fir.result %[[I_IVINC]] : i32 end do ! CHECK: } -! CHECK: fir.store %[[I_RES]]#1 to %[[I_PTR]] : !fir.ptr +! CHECK: fir.store %[[I_RES]] to %[[I_PTR]] : !fir.ptr end subroutine ! Test usage of non-default integer kind for loop control and loop index variable @@ -225,19 +219,18 @@ subroutine loop_with_non_default_integer(s,e,st) integer(kind=8) :: s, e, st ! CHECK: %[[I_LB:.*]] = fir.convert %[[S_CVT]] : (index) -> i64 - ! CHECK: %[[I_RES:.*]]:2 = fir.do_loop %[[LI:[^ ]*]] = + ! CHECK: %[[I_RES:.*]] = fir.do_loop %[[LI:[^ ]*]] = ! CHECK-SAME: %[[S_CVT]] to %[[E_CVT]] step %[[ST_CVT]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i64) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i64) { do i=s,e,st ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref - ! CHECK: %[[LI_NEXT:.*]] = arith.addi %[[LI]], %[[ST_CVT]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[ST_CVT]] : (index) -> i64 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i64 - ! CHECK: fir.result %[[LI_NEXT]], %[[I_IVINC]] : index, i64 + ! CHECK: fir.result %[[I_IVINC]] : i64 end do ! CHECK: } - ! CHECK: fir.store %[[I_RES]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[I_RES]] to %[[I_REF]] : !fir.ref end subroutine ! Test real loop control. diff --git a/flang/test/Lower/do_loop_unstructured.f90 b/flang/test/Lower/do_loop_unstructured.f90 index 176ea5ccee129..3b03850b43bb2 100644 --- a/flang/test/Lower/do_loop_unstructured.f90 +++ b/flang/test/Lower/do_loop_unstructured.f90 @@ -215,9 +215,8 @@ subroutine nested_structured_in_unstructured() ! CHECK: ^[[BODY]]: ! CHECK: %{{.*}} = fir.do_loop %[[J_INDEX:[^ ]*]] = ! CHECK-SAME: %{{.*}} to %{{.*}} step %[[ST:[^ ]*]] -! CHECK-SAME: iter_args(%[[J_IV:.*]] = %{{.*}}) -> (index, i32) { +! CHECK-SAME: iter_args(%[[J_IV:.*]] = %{{.*}}) -> (i32) { ! CHECK: fir.store %[[J_IV]] to %[[LOOP_VAR_J_REF]] : !fir.ref -! CHECK: %[[J_INDEX_NEXT:.*]] = arith.addi %[[J_INDEX]], %[[ST]] overflow : index ! CHECK: %[[LOOP_VAR_J:.*]] = fir.load %[[LOOP_VAR_J_REF]] : !fir.ref ! CHECK: %[[LOOP_VAR_J_NEXT:.*]] = arith.addi %[[LOOP_VAR_J]], %{{[^ ]*}} overflow : i32 ! CHECK: } diff --git a/flang/test/Lower/infinite_loop.f90 b/flang/test/Lower/infinite_loop.f90 index de0bee779c5b6..18309777e2928 100644 --- a/flang/test/Lower/infinite_loop.f90 +++ b/flang/test/Lower/infinite_loop.f90 @@ -94,17 +94,16 @@ subroutine structured_loop_in_infinite(i) ! CHECK: %[[C10_INDEX:.*]] = fir.convert %[[C10]] : (i32) -> index ! CHECK: %[[C1_1:.*]] = arith.constant 1 : index ! CHECK: %[[J_LB:.*]] = fir.convert %[[C1_INDEX]] : (index) -> i32 -! CHECK: %[[J_FINAL:.*]]:2 = fir.do_loop %[[J:[^ ]*]] = +! CHECK: %[[J_FINAL:.*]] = fir.do_loop %[[J:[^ ]*]] = ! CHECK-SAME: %[[C1_INDEX]] to %[[C10_INDEX]] step %[[C1_1]] -! CHECK-SAME: iter_args(%[[J_IV:.*]] = %[[J_LB]]) -> (index, i32) { +! CHECK-SAME: iter_args(%[[J_IV:.*]] = %[[J_LB]]) -> (i32) { ! CHECK: fir.store %[[J_IV]] to %[[J_REF]] : !fir.ref -! CHECK: %[[J_NEXT:.*]] = arith.addi %[[J]], %[[C1_1]] overflow : index ! CHECK: %[[J_STEPCAST:.*]] = fir.convert %[[C1_1]] : (index) -> i32 ! CHECK: %[[J_IVLOAD:.*]] = fir.load %[[J_REF]] : !fir.ref ! CHECK: %[[J_IVINC:.*]] = arith.addi %[[J_IVLOAD]], %[[J_STEPCAST]] overflow : i32 -! CHECK: fir.result %[[J_NEXT]], %[[J_IVINC]] : index, i32 +! CHECK: fir.result %[[J_IVINC]] : i32 ! CHECK: } -! CHECK: fir.store %[[J_FINAL]]#1 to %[[J_REF]] : !fir.ref +! CHECK: fir.store %[[J_FINAL]] to %[[J_REF]] : !fir.ref ! CHECK: cf.br ^[[BODY1]] ! CHECK: ^[[RETURN]]: ! CHECK: return diff --git a/flang/test/Lower/io-implied-do-fixes.f90 b/flang/test/Lower/io-implied-do-fixes.f90 index cd4fd43e05194..91e8cbc9868ff 100644 --- a/flang/test/Lower/io-implied-do-fixes.f90 +++ b/flang/test/Lower/io-implied-do-fixes.f90 @@ -10,8 +10,7 @@ ! CHECK: %[[J_VAL_FINAL:.*]] = fir.do_loop %[[J_VAL:.*]] = %{{.*}} to %{{.*}} step %{{.*}} -> index { ! CHECK: %[[J_VAL_CVT1:.*]] = fir.convert %[[J_VAL]] : (index) -> i32 ! CHECK: fir.store %[[J_VAL_CVT1]] to %[[J_ADDR]] : !fir.ptr -! CHECK: %[[J_VAL_NEXT:.*]] = arith.addi %[[J_VAL]], %{{[^ ]*}} overflow : index -! CHECK: fir.result %[[J_VAL_NEXT]] : index +! CHECK: fir.result %[[J_VAL]] : index ! CHECK: } ! CHECK: %[[J_VAL_CVT2:.*]] = fir.convert %[[J_VAL_FINAL]] : (index) -> i32 ! CHECK: fir.store %[[J_VAL_CVT2]] to %[[J_ADDR]] : !fir.ptr @@ -28,8 +27,7 @@ subroutine ido1 ! CHECK: %[[J_VAL_FINAL:.*]] = fir.do_loop %[[J_VAL:.*]] = %{{.*}} to %{{.*}} step %{{.*}} -> index { ! CHECK: %[[J_VAL_CVT1:.*]] = fir.convert %[[J_VAL]] : (index) -> i32 ! CHECK: fir.store %[[J_VAL_CVT1]] to %[[J_ADDR]] : !fir.heap -! CHECK: %[[J_VAL_NEXT:.*]] = arith.addi %[[J_VAL]], %{{[^ ]*}} overflow : index -! CHECK: fir.result %[[J_VAL_NEXT]] : index +! CHECK: fir.result %[[J_VAL]] : index ! CHECK: } ! CHECK: %[[J_VAL_CVT2:.*]] = fir.convert %[[J_VAL_FINAL]] : (index) -> i32 ! CHECK: fir.store %[[J_VAL_CVT2]] to %[[J_ADDR]] : !fir.heap diff --git a/flang/test/Lower/loops.f90 b/flang/test/Lower/loops.f90 index 64f14ff972272..2fea84b03891a 100644 --- a/flang/test/Lower/loops.f90 +++ b/flang/test/Lower/loops.f90 @@ -31,7 +31,7 @@ subroutine loop_test a(i,j,k) = a(i,j,k) + 1 enddo - ! CHECK-COUNT-3: fir.do_loop {{[^un]*}} -> (index, i32) + ! CHECK-COUNT-3: fir.do_loop {{[^un]*}} -> (i32) asum = 0 do i=1,5 do j=1,5 @@ -120,7 +120,7 @@ subroutine lis(n) ! CHECK: %[[V_95:[0-9]+]] = fir.alloca !fir.array, %{{.*}}, %{{.*}} {bindc_name = "t", pinned, uniq_name = "_QFlisEt"} ! CHECK: %[[V_96:[0-9]+]] = fir.alloca !fir.box>> {bindc_name = "p", pinned, uniq_name = "_QFlisEp"} ! CHECK: fir.store %{{.*}} to %[[V_96]] : !fir.ref>>> - ! CHECK: fir.do_loop %arg3 = %{{.*}} to %{{.*}} step %c1{{.*}} iter_args(%arg4 = %{{.*}}) -> (index, i32) { + ! CHECK: fir.do_loop %arg3 = %{{.*}} to %{{.*}} step %c1{{.*}} iter_args(%arg4 = %{{.*}}) -> (i32) { ! CHECK: fir.do_concurrent { ! CHECK: fir.alloca i32 {bindc_name = "m"} ! CHECK: fir.do_concurrent.loop (%{{.*}}) = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) { diff --git a/flang/test/Lower/loops2.f90 b/flang/test/Lower/loops2.f90 index 60a6bf6c53119..cdd5c986bce71 100644 --- a/flang/test/Lower/loops2.f90 +++ b/flang/test/Lower/loops2.f90 @@ -15,10 +15,10 @@ subroutine test_pointer() ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMtest_loop_varEi_pointer) : !fir.ref>> ! CHECK: %[[VAL_1:.*]] = fir.load %[[VAL_0]] : !fir.ref>> ! CHECK: %[[VAL_2:.*]] = fir.box_addr %[[VAL_1]] : (!fir.box>) -> !fir.ptr -! CHECK: %[[VAL_9:.*]]:2 = fir.do_loop{{.*}}iter_args(%[[IV:.*]] = {{.*}}) +! CHECK: %[[VAL_9:.*]] = fir.do_loop{{.*}}iter_args(%[[IV:.*]] = {{.*}}) ! CHECK: fir.store %[[IV]] to %[[VAL_2]] : !fir.ptr ! CHECK: } -! CHECK: fir.store %[[VAL_9]]#1 to %[[VAL_2]] : !fir.ptr +! CHECK: fir.store %[[VAL_9]] to %[[VAL_2]] : !fir.ptr end subroutine ! CHECK-LABEL: func @_QMtest_loop_varPtest_allocatable @@ -28,10 +28,10 @@ subroutine test_allocatable() ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QMtest_loop_varEi_allocatable) : !fir.ref>> ! CHECK: %[[VAL_1:.*]] = fir.load %[[VAL_0]] : !fir.ref>> ! CHECK: %[[VAL_2:.*]] = fir.box_addr %[[VAL_1]] : (!fir.box>) -> !fir.heap -! CHECK: %[[VAL_9:.*]]:2 = fir.do_loop{{.*}}iter_args(%[[IV:.*]] = {{.*}}) +! CHECK: %[[VAL_9:.*]] = fir.do_loop{{.*}}iter_args(%[[IV:.*]] = {{.*}}) ! CHECK: fir.store %[[IV]] to %[[VAL_2]] : !fir.heap ! CHECK: } -! CHECK: fir.store %[[VAL_9]]#1 to %[[VAL_2]] : !fir.heap +! CHECK: fir.store %[[VAL_9]] to %[[VAL_2]] : !fir.heap end subroutine ! CHECK-LABEL: func @_QMtest_loop_varPtest_real_pointer diff --git a/flang/test/Lower/mixed_loops.f90 b/flang/test/Lower/mixed_loops.f90 index 991fd7aa82bb9..b0e1114b8dcf1 100644 --- a/flang/test/Lower/mixed_loops.f90 +++ b/flang/test/Lower/mixed_loops.f90 @@ -92,23 +92,22 @@ subroutine do_inside_while_loop ! CHECK-DAG: %[[C13:.*]] = fir.convert %[[C13_I32]] : (i32) -> index ! CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index ! CHECK: %[[I_LB:.*]] = fir.convert %[[C8]] : (index) -> i32 - ! CHECK: %[[RESULT:.*]]:2 = fir.do_loop %[[IDX:[^ ]*]] = + ! CHECK: %[[RESULT:.*]] = fir.do_loop %[[IDX:[^ ]*]] = ! CHECK-SAME: %[[C8]] to %[[C13]] step %[[C1]] - ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (index, i32) { + ! CHECK-SAME: iter_args(%[[I_IV:.*]] = %[[I_LB]]) -> (i32) { ! CHECK: fir.store %[[I_IV]] to %[[I_REF]] : !fir.ref ! CHECK-DAG: %[[J2:.*]] = fir.load %[[J_REF]] : !fir.ref ! CHECK-DAG: %[[C2:.*]] = arith.constant 2 : i32 ! CHECK: %[[JINC:.*]] = arith.muli %[[C2]], %[[J2]] : i32 ! CHECK: fir.store %[[JINC]] to %[[J_REF]] : !fir.ref - ! CHECK: %[[IINC:.*]] = arith.addi %[[IDX]], %[[C1]] overflow : index ! CHECK: %[[I_STEPCAST:.*]] = fir.convert %[[C1]] : (index) -> i32 ! CHECK: %[[I_IVLOAD:.*]] = fir.load %[[I_REF]] : !fir.ref ! CHECK: %[[I_IVINC:.*]] = arith.addi %[[I_IVLOAD]], %[[I_STEPCAST]] overflow : i32 - ! CHECK: fir.result %[[IINC]], %[[I_IVINC]] : index, i32 + ! CHECK: fir.result %[[I_IVINC]] : i32 do i=8,13 j=j*2 - ! CHECK: fir.store %[[RESULT]]#1 to %[[I_REF]] : !fir.ref + ! CHECK: fir.store %[[RESULT]] to %[[I_REF]] : !fir.ref end do ! CHECK: br ^[[HDR1]] diff --git a/flang/test/Lower/nsw.f90 b/flang/test/Lower/nsw.f90 index 2ec1efb2af42a..e113c26a9dc80 100644 --- a/flang/test/Lower/nsw.f90 +++ b/flang/test/Lower/nsw.f90 @@ -84,7 +84,7 @@ subroutine loop_params(a,lb,ub,st) ! CHECK: %[[VAL_30:.*]] = arith.muli %[[VAL_29]], %[[VAL_4]] overflow : i32 ! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_30]] : (i32) -> index ! CHECK: %[[VAL_32:.*]] = fir.convert %[[VAL_26]] : (index) -> i32 -! CHECK: %[[VAL_33:.*]]:2 = fir.do_loop %[[VAL_34:.*]] = %[[VAL_26]] to %[[VAL_28]] step %[[VAL_31]] iter_args(%[[VAL_35:.*]] = %[[VAL_32]]) -> (index, i32) { +! CHECK: %[[VAL_33:.*]] = fir.do_loop %[[VAL_34:.*]] = %[[VAL_26]] to %[[VAL_28]] step %[[VAL_31]] iter_args(%[[VAL_35:.*]] = %[[VAL_32]]) -> (i32) { subroutine loop_params2(a,lb,ub,st) integer :: i, lb, ub, st diff --git a/flang/test/Parser/OpenMP/assumption.f90 b/flang/test/Parser/OpenMP/assumption.f90 index 0f333f99f9085..86cbad9e42f78 100644 --- a/flang/test/Parser/OpenMP/assumption.f90 +++ b/flang/test/Parser/OpenMP/assumption.f90 @@ -141,9 +141,11 @@ program p end program p !UNPARSE: PROGRAM p -!UNPARSE: !$OMP ASSUMES NO_OPENMP +!UNPARSE: !$OMP ASSUMES NO_OPENMP !UNPARSE: END PROGRAM p -!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclarativeAssumes -!PARSE-TREE: | Verbatim +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclarativeAssumes -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = assumes !PARSE-TREE: | OmpClauseList -> OmpClause -> NoOpenmp +!PARSE-TREE: | Flags = None +!PARSE-TREE: ImplicitPart -> diff --git a/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 b/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 index 916bd66017ce0..16dc4eb44e6fd 100644 --- a/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 +++ b/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 @@ -1,5 +1,3 @@ -! REQUIRES: openmp_runtime - ! RUN: %flang_fc1 %openmp_flags -fopenmp-version=52 -fdebug-dump-parse-tree %s | FileCheck %s ! RUN: %flang_fc1 %openmp_flags -fdebug-unparse -fopenmp-version=52 %s | FileCheck %s --check-prefix="UNPARSE" @@ -15,11 +13,14 @@ function func() result(i) contains function func1() result(i) !$omp declare target enter(func1) indirect(.true.) - !CHECK: | | | | | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> Enter -> OmpEnterClause - !CHECK-NEXT: | | | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func1' - !CHECK-NEXT: | | | | | OmpClause -> Indirect -> OmpIndirectClause -> Scalar -> Logical -> Expr = '.true._4' - !CHECK-NEXT: | | | | | | LiteralConstant -> LogicalLiteralConstant - !CHECK-NEXT: | | | | | | | bool = 'true' + !CHECK: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification + !CHECK-NEXT: | OmpDirectiveName -> llvm::omp::Directive = declare target + !CHECK-NEXT: | OmpClauseList -> OmpClause -> Enter -> OmpEnterClause + !CHECK-NEXT: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func1' + !CHECK-NEXT: | OmpClause -> Indirect -> OmpIndirectClause -> Scalar -> Logical -> Expr = '.true._4' + !CHECK-NEXT: | | LiteralConstant -> LogicalLiteralConstant + !CHECK-NEXT: | | | bool = 'true' + !CHECK-NEXT: | Flags = None character(1) :: i i = 'a' return @@ -27,9 +28,12 @@ function func1() result(i) function func2() result(i) !$omp declare target enter(func2) indirect - !CHECK: | | | | | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> Enter -> OmpEnterClause - !CHECK-NEXT: | | | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func2' - !CHECK-NEXT: | | | | | OmpClause -> Indirect -> OmpIndirectClause -> + !CHECK: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification + !CHECK-NEXT: | OmpDirectiveName -> llvm::omp::Directive = declare target + !CHECK-NEXT: | OmpClauseList -> OmpClause -> Enter -> OmpEnterClause + !CHECK-NEXT: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func2' + !CHECK-NEXT: | OmpClause -> Indirect -> OmpIndirectClause -> + !CHECK-NEXT: | Flags = None character(1) :: i i = 'b' return @@ -51,5 +55,5 @@ program main end program -!UNPARSE: !$OMP DECLARE TARGET ENTER(func1) INDIRECT(.true._4) -!UNPARSE: !$OMP DECLARE TARGET ENTER(func2) INDIRECT() +!UNPARSE: !$OMP DECLARE TARGET ENTER(func1) INDIRECT(.true._4) +!UNPARSE: !$OMP DECLARE TARGET ENTER(func2) INDIRECT() diff --git a/flang/test/Parser/OpenMP/declare-target-to-clause.f90 b/flang/test/Parser/OpenMP/declare-target-to-clause.f90 index bcb23f821e403..8198f44bcec18 100644 --- a/flang/test/Parser/OpenMP/declare-target-to-clause.f90 +++ b/flang/test/Parser/OpenMP/declare-target-to-clause.f90 @@ -9,11 +9,13 @@ module m !UNPARSE: MODULE m !UNPARSE: INTEGER x, y -!UNPARSE: !$OMP DECLARE TARGET TO(x,y) +!UNPARSE: !$OMP DECLARE TARGET TO(x,y) !UNPARSE: END MODULE -!PARSE-TREE: OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> To -> OmpToClause -!PARSE-TREE: | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' -!PARSE-TREE: | OmpObject -> Designator -> DataRef -> Name = 'y' -!PARSE-TREE: | bool = 'true' - +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | OmpObject -> Designator -> DataRef -> Name = 'y' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None diff --git a/flang/test/Parser/OpenMP/declare_target-device_type.f90 b/flang/test/Parser/OpenMP/declare_target-device_type.f90 index b6903614a628e..7df796288f4d4 100644 --- a/flang/test/Parser/OpenMP/declare_target-device_type.f90 +++ b/flang/test/Parser/OpenMP/declare_target-device_type.f90 @@ -3,35 +3,113 @@ subroutine openmp_declare_target integer, save :: x, y - !CHECK: !$omp declare target device_type(host) enter(x) +!CHECK: !$omp declare target device_type(host) enter(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | Flags = None !$omp declare target device_type(host) enter(x) - !CHECK: !$omp declare target device_type(nohost) enter(x) + +!CHECK: !$omp declare target device_type(nohost) enter(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | Flags = None !$omp declare target device_type(nohost) enter(x) - !CHECK: !$omp declare target device_type(any) enter(x) + +!CHECK: !$omp declare target device_type(any) enter(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | Flags = None !$omp declare target device_type(any) enter(x) - !CHECK: !$omp declare target device_type(host) to(x) +!CHECK: !$omp declare target device_type(host) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(host) to(x) - !CHECK: !$omp declare target device_type(nohost) to(x) + +!CHECK: !$omp declare target device_type(nohost) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(nohost) to(x) - !CHECK: !$omp declare target device_type(any) to(x) + +!CHECK: !$omp declare target device_type(any) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(any) to(x) - !CHECK: !$omp declare target device_type(host) enter(y) to(x) +!CHECK: !$omp declare target device_type(host) enter(y) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'y' +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(host) enter(y) to(x) - !CHECK: !$omp declare target device_type(nohost) enter(y) to(x) + +!CHECK: !$omp declare target device_type(nohost) enter(y) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'y' +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(nohost) enter(y) to(x) - !CHECK: !$omp declare target device_type(any) enter(y) to(x) + +!CHECK: !$omp declare target device_type(any) enter(y) to(x) + +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any +!PARSE-TREE: | OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'y' +!PARSE-TREE: | OmpClause -> To -> OmpToClause +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | | bool = 'true' +!PARSE-TREE: | Flags = None !$omp declare target device_type(any) enter(y) to(x) + integer :: a(1024), i - !CHECK: do +!CHECK: do do i = 1, 1024 a(i) = i - !CHECK: end do +!CHECK: end do end do -!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -!PARSE-TREE: OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Host -!PARSE-TREE: OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Nohost -!PARSE-TREE: OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> DeviceType -> OmpDeviceTypeClause -> DeviceTypeDescription = Any END subroutine openmp_declare_target diff --git a/flang/test/Parser/OpenMP/enter-automap-modifier.f90 b/flang/test/Parser/OpenMP/enter-automap-modifier.f90 index 1f361ca5c2f06..bc5b5eb3e7ef3 100644 --- a/flang/test/Parser/OpenMP/enter-automap-modifier.f90 +++ b/flang/test/Parser/OpenMP/enter-automap-modifier.f90 @@ -8,9 +8,12 @@ program automap !UNPARSE: PROGRAM AUTOMAP !UNPARSE: INTEGER x -!UNPARSE: !$OMP DECLARE TARGET ENTER(AUTOMAP: x) +!UNPARSE: !$OMP DECLARE_TARGET ENTER(AUTOMAP: x) !UNPARSE: END PROGRAM -!PARSE-TREE: OmpClauseList -> OmpClause -> Enter -> OmpEnterClause -!PARSE-TREE-NEXT: | Modifier -> OmpAutomapModifier -> Value = Automap -!PARSE-TREE-NEXT: | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: DeclarationConstruct -> SpecificationConstruct -> OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> OmpClause -> Enter -> OmpEnterClause +!PARSE-TREE: | | Modifier -> OmpAutomapModifier -> Value = Automap +!PARSE-TREE: | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x' +!PARSE-TREE: | Flags = None diff --git a/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 b/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 index b72c5a2c1c086..f4cdd556bd4e5 100644 --- a/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 +++ b/flang/test/Parser/OpenMP/openmp6-directive-spellings.f90 @@ -124,12 +124,13 @@ subroutine f04 end !UNPARSE: SUBROUTINE f04 -!UNPARSE: !$OMP DECLARE TARGET +!UNPARSE: !$OMP DECLARE_TARGET !UNPARSE: END SUBROUTINE -!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -!PARSE-TREE: | Verbatim -!PARSE-TREE: | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPDeclareTargetConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = declare target +!PARSE-TREE: | OmpClauseList -> +!PARSE-TREE: | Flags = None subroutine f05 implicit none diff --git a/flang/test/Parser/OpenMP/requires.f90 b/flang/test/Parser/OpenMP/requires.f90 new file mode 100644 index 0000000000000..6cbb06eaf93c0 --- /dev/null +++ b/flang/test/Parser/OpenMP/requires.f90 @@ -0,0 +1,33 @@ +!RUN: %flang_fc1 -fdebug-unparse -fopenmp -fopenmp-version=50 %s | FileCheck --ignore-case --check-prefix="UNPARSE" %s +!RUN: %flang_fc1 -fdebug-dump-parse-tree -fopenmp -fopenmp-version=50 %s | FileCheck --check-prefix="PARSE-TREE" %s + +!$omp requires atomic_default_mem_order(seq_cst) + +!UNPARSE: !$OMP REQUIRES ATOMIC_DEFAULT_MEM_ORDER(SEQ_CST) + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPRequiresConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires +!PARSE-TREE: | OmpClauseList -> OmpClause -> AtomicDefaultMemOrder -> OmpAtomicDefaultMemOrderClause -> OmpMemoryOrderType = Seq_Cst +!PARSE-TREE: | Flags = None + +!$omp requires unified_shared_memory unified_address + +!UNPARSE: !$OMP REQUIRES UNIFIED_SHARED_MEMORY UNIFIED_ADDRESS + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPRequiresConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires +!PARSE-TREE: | OmpClauseList -> OmpClause -> UnifiedSharedMemory +!PARSE-TREE: | OmpClause -> UnifiedAddress +!PARSE-TREE: | Flags = None + +!$omp requires dynamic_allocators reverse_offload + +!UNPARSE: !$OMP REQUIRES DYNAMIC_ALLOCATORS REVERSE_OFFLOAD + +!PARSE-TREE: OpenMPDeclarativeConstruct -> OpenMPRequiresConstruct -> OmpDirectiveSpecification +!PARSE-TREE: | OmpDirectiveName -> llvm::omp::Directive = requires +!PARSE-TREE: | OmpClauseList -> OmpClause -> DynamicAllocators +!PARSE-TREE: | OmpClause -> ReverseOffload +!PARSE-TREE: | Flags = None + +end diff --git a/flang/test/Semantics/OpenMP/blank-common-block.f90 b/flang/test/Semantics/OpenMP/blank-common-block.f90 index 4a217fced0ff7..e410f0c56d1c0 100644 --- a/flang/test/Semantics/OpenMP/blank-common-block.f90 +++ b/flang/test/Semantics/OpenMP/blank-common-block.f90 @@ -4,6 +4,7 @@ module m integer :: a common // a !ERROR: Blank common blocks are not allowed as directive or clause arguments + !ERROR: An argument to the DECLARE TARGET directive should be an extended-list-item !$omp declare_target(//) !ERROR: Blank common blocks are not allowed as directive or clause arguments !$omp threadprivate(//) diff --git a/flang/test/Semantics/OpenMP/declare-target-function-name-with-symbols.f90 b/flang/test/Semantics/OpenMP/declare-target-function-name-with-symbols.f90 index 9a0acdb3dd100..3439e6fd13981 100644 --- a/flang/test/Semantics/OpenMP/declare-target-function-name-with-symbols.f90 +++ b/flang/test/Semantics/OpenMP/declare-target-function-name-with-symbols.f90 @@ -19,7 +19,7 @@ end module test !CHECK: !DEF: /test/ex/b ObjectEntity INTEGER(4) !CHECK: !DEF: /test/ex/c ObjectEntity INTEGER(4) !CHECK: function ex(a, b, c) -!CHECK: !$omp declare target (ex) +!CHECK: !$omp declare target(ex) !CHECK: !REF: /test/ex/a !CHECK: !REF: /test/ex/b !CHECK: !REF: /test/ex/c diff --git a/flang/test/Transforms/OpenMP/simd-only.mlir b/flang/test/Transforms/OpenMP/simd-only.mlir index 0025d10fbd21a..a550d5660d224 100644 --- a/flang/test/Transforms/OpenMP/simd-only.mlir +++ b/flang/test/Transforms/OpenMP/simd-only.mlir @@ -65,10 +65,10 @@ func.func @parallel(%arg0: i32, %arg1: !fir.ref) { // CHECK: fir.convert %16 = fir.convert %c100000_i32 : (i32) -> index // CHECK: fir.do_loop - %18:2 = fir.do_loop %arg4 = %15 to %16 step %c1 iter_args(%arg2 = %arg0) -> (index, i32) { + %18 = fir.do_loop %arg4 = %15 to %16 step %c1 iter_args(%arg2 = %arg0) -> (i32) { // CHECK: fir.store fir.store %arg0 to %arg1 : !fir.ref - fir.result %arg4, %arg2 : index, i32 + fir.result %arg2 : i32 } // CHECK-NOT: omp.terminator omp.terminator diff --git a/libc/shared/math.h b/libc/shared/math.h index 9ba898ea6dac9..cccd6a375930e 100644 --- a/libc/shared/math.h +++ b/libc/shared/math.h @@ -45,6 +45,7 @@ #include "math/exp10.h" #include "math/exp10f.h" #include "math/exp10f16.h" +#include "math/exp10m1f.h" #include "math/expf.h" #include "math/expf16.h" #include "math/frexpf.h" diff --git a/libc/shared/math/exp10m1f.h b/libc/shared/math/exp10m1f.h new file mode 100644 index 0000000000000..9093705ce801b --- /dev/null +++ b/libc/shared/math/exp10m1f.h @@ -0,0 +1,23 @@ +//===-- Shared exp10m1f function --------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SHARED_MATH_EXP10M1F_H +#define LLVM_LIBC_SHARED_MATH_EXP10M1F_H + +#include "shared/libc_common.h" +#include "src/__support/math/exp10m1f.h" + +namespace LIBC_NAMESPACE_DECL { +namespace shared { + +using math::exp10m1f; + +} // namespace shared +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SHARED_MATH_EXP10M1F_H diff --git a/libc/src/__support/CPP/simd.h b/libc/src/__support/CPP/simd.h index d2a5b17fa4b9f..422d2f4c8433d 100644 --- a/libc/src/__support/CPP/simd.h +++ b/libc/src/__support/CPP/simd.h @@ -287,34 +287,72 @@ LIBC_INLINE constexpr static T hmax(simd v) { } // Accessor helpers. -template = 0> -LIBC_INLINE T load_unaligned(const void *ptr) { +template +LIBC_INLINE T constexpr static load(const void *ptr, bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); T tmp; - __builtin_memcpy(&tmp, ptr, sizeof(T)); + __builtin_memcpy_inline( + &tmp, reinterpret_cast *>(ptr), sizeof(T)); return tmp; } template = 0> -LIBC_INLINE T load_aligned(const void *ptr) { - return load_unaligned(__builtin_assume_aligned(ptr, alignof(T))); +LIBC_INLINE constexpr static void store(T v, void *ptr, bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + __builtin_memcpy_inline(ptr, &v, sizeof(T)); } template = 0> -LIBC_INLINE T store_unaligned(T v, void *ptr) { - __builtin_memcpy(ptr, &v, sizeof(T)); +LIBC_INLINE constexpr static T +load_masked(simd> mask, const void *ptr, + T passthru = internal::poison(), bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + return __builtin_masked_load( + mask, reinterpret_cast *>(ptr), passthru); } template = 0> -LIBC_INLINE T store_aligned(T v, void *ptr) { - store_unaligned(v, __builtin_assume_aligned(ptr, alignof(T))); +LIBC_INLINE constexpr static void store_masked(simd> mask, + T v, void *ptr, + bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + __builtin_masked_store(mask, v, + reinterpret_cast *>(ptr)); +} +template = 0> +LIBC_INLINE constexpr static T gather(simd> mask, Idx idx, + const void *base, bool aligned = false) { + if (aligned) + base = __builtin_assume_aligned(base, alignof(T)); + return __builtin_masked_gather( + mask, idx, reinterpret_cast *>(base)); +} +template = 0> +LIBC_INLINE constexpr static void scatter(simd> mask, + Idx idx, T v, void *base, + bool aligned = false) { + if (aligned) + base = __builtin_assume_aligned(base, alignof(T)); + __builtin_masked_scatter(mask, idx, v, + reinterpret_cast *>(base)); } template = 0> -LIBC_INLINE T -masked_load(simd> m, void *ptr, - T passthru = internal::poison>()) { - return __builtin_masked_load(m, ptr, passthru); +LIBC_INLINE constexpr static T +expand(simd> mask, const void *ptr, + T passthru = internal::poison(), bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + return __builtin_masked_expand_load( + mask, reinterpret_cast *>(ptr), passthru); } template = 0> -LIBC_INLINE T masked_store(simd> m, T v, void *ptr) { - __builtin_masked_store( - m, v, static_cast(__builtin_assume_aligned(ptr, alignof(T)))); +LIBC_INLINE constexpr static void compress(simd> mask, T v, + void *ptr, bool aligned = false) { + if (aligned) + ptr = __builtin_assume_aligned(ptr, alignof(T)); + __builtin_masked_compress_store( + mask, v, reinterpret_cast *>(ptr)); } // Construction helpers. diff --git a/libc/src/__support/math/CMakeLists.txt b/libc/src/__support/math/CMakeLists.txt index 12ffa2ab456e7..84c1b15498672 100644 --- a/libc/src/__support/math/CMakeLists.txt +++ b/libc/src/__support/math/CMakeLists.txt @@ -481,6 +481,23 @@ add_header_library( libc.src.__support.FPUtil.generic.sqrt ) +add_header_library( + exp10m1f + HDRS + exp10m1f.h + DEPENDS + .exp10f_utils + libc.src.errno.errno + libc.src.__support.common + libc.src.__support.FPUtil.except_value_utils + libc.src.__support.FPUtil.fenv_impl + libc.src.__support.FPUtil.fp_bits + libc.src.__support.FPUtil.multiply_add + libc.src.__support.FPUtil.polyeval + libc.src.__support.FPUtil.rounding_mode + libc.src.__support.macros.optimization +) + add_header_library( erff HDRS diff --git a/libc/src/__support/math/exp10m1f.h b/libc/src/__support/math/exp10m1f.h new file mode 100644 index 0000000000000..9fe4ff774ec68 --- /dev/null +++ b/libc/src/__support/math/exp10m1f.h @@ -0,0 +1,234 @@ +//===-- Implementation header for exp10m1f ----------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F_H +#define LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F_H + +#include "exp10f_utils.h" +#include "src/__support/FPUtil/FEnvImpl.h" +#include "src/__support/FPUtil/FPBits.h" +#include "src/__support/FPUtil/PolyEval.h" +#include "src/__support/FPUtil/except_value_utils.h" +#include "src/__support/FPUtil/multiply_add.h" +#include "src/__support/FPUtil/rounding_mode.h" +#include "src/__support/common.h" +#include "src/__support/libc_errno.h" +#include "src/__support/macros/config.h" +#include "src/__support/macros/optimization.h" + +namespace LIBC_NAMESPACE_DECL { + +namespace math { + +namespace exp10m1f_internal { + +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS +static constexpr size_t N_EXCEPTS_LO = 11; + +static constexpr fputil::ExceptValues EXP10M1F_EXCEPTS_LO = + {{ + // x = 0x1.0fe54ep-11, exp10m1f(x) = 0x1.3937eep-10 (RZ) + {0x3a07'f2a7U, 0x3a9c'9bf7U, 1U, 0U, 1U}, + // x = 0x1.80e6eap-11, exp10m1f(x) = 0x1.bb8272p-10 (RZ) + {0x3a40'7375U, 0x3add'c139U, 1U, 0U, 1U}, + // x = -0x1.2a33bcp-51, exp10m1f(x) = -0x1.57515ep-50 (RZ) + {0xa615'19deU, 0xa6ab'a8afU, 0U, 1U, 0U}, + // x = -0x0p+0, exp10m1f(x) = -0x0p+0 (RZ) + {0x8000'0000U, 0x8000'0000U, 0U, 0U, 0U}, + // x = -0x1.b59e08p-31, exp10m1f(x) = -0x1.f7d356p-30 (RZ) + {0xb05a'cf04U, 0xb0fb'e9abU, 0U, 1U, 1U}, + // x = -0x1.bf342p-12, exp10m1f(x) = -0x1.014e02p-10 (RZ) + {0xb9df'9a10U, 0xba80'a701U, 0U, 1U, 0U}, + // x = -0x1.6207fp-11, exp10m1f(x) = -0x1.9746cap-10 (RZ) + {0xba31'03f8U, 0xbacb'a365U, 0U, 1U, 1U}, + // x = -0x1.bd0c66p-11, exp10m1f(x) = -0x1.ffe168p-10 (RZ) + {0xba5e'8633U, 0xbaff'f0b4U, 0U, 1U, 1U}, + // x = -0x1.ffd84cp-10, exp10m1f(x) = -0x1.25faf2p-8 (RZ) + {0xbaff'ec26U, 0xbb92'fd79U, 0U, 1U, 0U}, + // x = -0x1.a74172p-9, exp10m1f(x) = -0x1.e57be2p-8 (RZ) + {0xbb53'a0b9U, 0xbbf2'bdf1U, 0U, 1U, 1U}, + // x = -0x1.cb694cp-9, exp10m1f(x) = -0x1.0764e4p-7 (RZ) + {0xbb65'b4a6U, 0xbc03'b272U, 0U, 1U, 0U}, + }}; + +static constexpr size_t N_EXCEPTS_HI = 19; + +static constexpr fputil::ExceptValues EXP10M1F_EXCEPTS_HI = + {{ + // (input, RZ output, RU offset, RD offset, RN offset) + // x = 0x1.8d31eep-8, exp10m1f(x) = 0x1.cc7e4cp-7 (RZ) + {0x3bc6'98f7U, 0x3c66'3f26U, 1U, 0U, 1U}, + // x = 0x1.915fcep-8, exp10m1f(x) = 0x1.d15f72p-7 (RZ) + {0x3bc8'afe7U, 0x3c68'afb9U, 1U, 0U, 0U}, + // x = 0x1.bcf982p-8, exp10m1f(x) = 0x1.022928p-6 (RZ) + {0x3bde'7cc1U, 0x3c81'1494U, 1U, 0U, 1U}, + // x = 0x1.99ff0ap-7, exp10m1f(x) = 0x1.dee416p-6 (RZ) + {0x3c4c'ff85U, 0x3cef'720bU, 1U, 0U, 0U}, + // x = 0x1.75ea14p-6, exp10m1f(x) = 0x1.b9ff16p-5 (RZ) + {0x3cba'f50aU, 0x3d5c'ff8bU, 1U, 0U, 0U}, + // x = 0x1.f81b64p-6, exp10m1f(x) = 0x1.2cb6bcp-4 (RZ) + {0x3cfc'0db2U, 0x3d96'5b5eU, 1U, 0U, 0U}, + // x = 0x1.fafecp+3, exp10m1f(x) = 0x1.8c880ap+52 (RZ) + {0x417d'7f60U, 0x59c6'4405U, 1U, 0U, 0U}, + // x = -0x1.3bf094p-8, exp10m1f(x) = -0x1.69ba4ap-7 (RZ) + {0xbb9d'f84aU, 0xbc34'dd25U, 0U, 1U, 0U}, + // x = -0x1.4558bcp-8, exp10m1f(x) = -0x1.746fb8p-7 (RZ) + {0xbba2'ac5eU, 0xbc3a'37dcU, 0U, 1U, 1U}, + // x = -0x1.4bb43p-8, exp10m1f(x) = -0x1.7babe4p-7 (RZ) + {0xbba5'da18U, 0xbc3d'd5f2U, 0U, 1U, 1U}, + // x = -0x1.776cc8p-8, exp10m1f(x) = -0x1.ad62c4p-7 (RZ) + {0xbbbb'b664U, 0xbc56'b162U, 0U, 1U, 0U}, + // x = -0x1.f024cp-8, exp10m1f(x) = -0x1.1b20d6p-6 (RZ) + {0xbbf8'1260U, 0xbc8d'906bU, 0U, 1U, 1U}, + // x = -0x1.f510eep-8, exp10m1f(x) = -0x1.1de9aap-6 (RZ) + {0xbbfa'8877U, 0xbc8e'f4d5U, 0U, 1U, 0U}, + // x = -0x1.0b43c4p-7, exp10m1f(x) = -0x1.30d418p-6 (RZ) + {0xbc05'a1e2U, 0xbc98'6a0cU, 0U, 1U, 0U}, + // x = -0x1.245ee4p-7, exp10m1f(x) = -0x1.4d2b86p-6 (RZ) + {0xbc12'2f72U, 0xbca6'95c3U, 0U, 1U, 0U}, + // x = -0x1.f9f2dap-7, exp10m1f(x) = -0x1.1e2186p-5 (RZ) + {0xbc7c'f96dU, 0xbd0f'10c3U, 0U, 1U, 0U}, + // x = -0x1.08e42p-6, exp10m1f(x) = -0x1.2b5c4p-5 (RZ) + {0xbc84'7210U, 0xbd15'ae20U, 0U, 1U, 1U}, + // x = -0x1.0cdc44p-5, exp10m1f(x) = -0x1.2a2152p-4 (RZ) + {0xbd06'6e22U, 0xbd95'10a9U, 0U, 1U, 1U}, + // x = -0x1.ca4322p-5, exp10m1f(x) = -0x1.ef073p-4 (RZ) + {0xbd65'2191U, 0xbdf7'8398U, 0U, 1U, 1U}, + }}; +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + +} // namespace exp10m1f_internal + +LIBC_INLINE static constexpr float exp10m1f(float x) { + using namespace exp10m1f_internal; + using FPBits = fputil::FPBits; + FPBits xbits(x); + + uint32_t x_u = xbits.uintval(); + uint32_t x_abs = x_u & 0x7fff'ffffU; + + // When x >= log10(2^128), or x is nan + if (LIBC_UNLIKELY(xbits.is_pos() && x_u >= 0x421a'209bU)) { + if (xbits.is_finite()) { + int rounding = fputil::quick_get_round(); + if (rounding == FE_DOWNWARD || rounding == FE_TOWARDZERO) + return FPBits::max_normal().get_val(); + + fputil::set_errno_if_required(ERANGE); + fputil::raise_except_if_required(FE_OVERFLOW); + } + + // x >= log10(2^128) and 10^x - 1 rounds to +inf, or x is +inf or nan + return x + FPBits::inf().get_val(); + } + + // When |x| <= log10(2) * 2^(-6) + if (LIBC_UNLIKELY(x_abs <= 0x3b9a'209bU)) { +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + if (auto r = EXP10M1F_EXCEPTS_LO.lookup(x_u); LIBC_UNLIKELY(r.has_value())) + return r.value(); +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + + double dx = x; + double dx_sq = dx * dx; + double c0 = dx * Exp10Base::COEFFS[0]; + double c1 = + fputil::multiply_add(dx, Exp10Base::COEFFS[2], Exp10Base::COEFFS[1]); + double c2 = + fputil::multiply_add(dx, Exp10Base::COEFFS[4], Exp10Base::COEFFS[3]); + // 10^dx - 1 ~ (1 + COEFFS[0] * dx + ... + COEFFS[4] * dx^5) - 1 + // = COEFFS[0] * dx + ... + COEFFS[4] * dx^5 + return static_cast(fputil::polyeval(dx_sq, c0, c1, c2)); + } + + // When x <= log10(2^-25), or x is nan + if (LIBC_UNLIKELY(x_u >= 0xc0f0d2f1)) { + // exp10m1(-inf) = -1 + if (xbits.is_inf()) + return -1.0f; + // exp10m1(nan) = nan + if (xbits.is_nan()) + return x; + + int rounding = fputil::quick_get_round(); + if (rounding == FE_UPWARD || rounding == FE_TOWARDZERO || + (rounding == FE_TONEAREST && x_u == 0xc0f0d2f1)) + return -0x1.ffff'fep-1f; // -1.0f + 0x1.0p-24f + + fputil::set_errno_if_required(ERANGE); + fputil::raise_except_if_required(FE_UNDERFLOW); + return -1.0f; + } + + // Exact outputs when x = 1, 2, ..., 10. + // Quick check mask: 0x800f'ffffU = ~(bits of 1.0f | ... | bits of 10.0f) + if (LIBC_UNLIKELY((x_u & 0x800f'ffffU) == 0)) { + switch (x_u) { + case 0x3f800000U: // x = 1.0f + return 9.0f; + case 0x40000000U: // x = 2.0f + return 99.0f; + case 0x40400000U: // x = 3.0f + return 999.0f; + case 0x40800000U: // x = 4.0f + return 9'999.0f; + case 0x40a00000U: // x = 5.0f + return 99'999.0f; + case 0x40c00000U: // x = 6.0f + return 999'999.0f; + case 0x40e00000U: // x = 7.0f + return 9'999'999.0f; + case 0x41000000U: { // x = 8.0f + int rounding = fputil::quick_get_round(); + if (rounding == FE_UPWARD || rounding == FE_TONEAREST) + return 100'000'000.0f; + return 99'999'992.0f; + } + case 0x41100000U: { // x = 9.0f + int rounding = fputil::quick_get_round(); + if (rounding == FE_UPWARD || rounding == FE_TONEAREST) + return 1'000'000'000.0f; + return 999'999'936.0f; + } + case 0x41200000U: { // x = 10.0f + int rounding = fputil::quick_get_round(); + if (rounding == FE_UPWARD || rounding == FE_TONEAREST) + return 10'000'000'000.0f; + return 9'999'998'976.0f; + } + } + } + +#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS + if (auto r = EXP10M1F_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value())) + return r.value(); +#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS + + // Range reduction: 10^x = 2^(mid + hi) * 10^lo + // rr = (2^(mid + hi), lo) + auto rr = exp_b_range_reduc(x); + + // The low part is approximated by a degree-5 minimax polynomial. + // 10^lo ~ 1 + COEFFS[0] * lo + ... + COEFFS[4] * lo^5 + double lo_sq = rr.lo * rr.lo; + double c0 = fputil::multiply_add(rr.lo, Exp10Base::COEFFS[0], 1.0); + double c1 = + fputil::multiply_add(rr.lo, Exp10Base::COEFFS[2], Exp10Base::COEFFS[1]); + double c2 = + fputil::multiply_add(rr.lo, Exp10Base::COEFFS[4], Exp10Base::COEFFS[3]); + double exp10_lo = fputil::polyeval(lo_sq, c0, c1, c2); + // 10^x - 1 = 2^(mid + hi) * 10^lo - 1 + // ~ mh * exp10_lo - 1 + return static_cast(fputil::multiply_add(exp10_lo, rr.mh, -1.0)); +} + +} // namespace math + +} // namespace LIBC_NAMESPACE_DECL + +#endif // LLVM_LIBC_SRC___SUPPORT_MATH_EXP10M1F_H diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt index 7cd34fab53480..8074a3925626c 100644 --- a/libc/src/math/generic/CMakeLists.txt +++ b/libc/src/math/generic/CMakeLists.txt @@ -1593,16 +1593,7 @@ add_entrypoint_object( HDRS ../exp10m1f.h DEPENDS - libc.src.errno.errno - libc.src.__support.common - libc.src.__support.FPUtil.except_value_utils - libc.src.__support.FPUtil.fenv_impl - libc.src.__support.FPUtil.fp_bits - libc.src.__support.FPUtil.multiply_add - libc.src.__support.FPUtil.polyeval - libc.src.__support.FPUtil.rounding_mode - libc.src.__support.macros.optimization - libc.src.__support.math.exp10f_utils + libc.src.__support.math.exp10m1f ) add_entrypoint_object( diff --git a/libc/src/math/generic/exp10m1f.cpp b/libc/src/math/generic/exp10m1f.cpp index 8589e3fb6639d..87980b7753b40 100644 --- a/libc/src/math/generic/exp10m1f.cpp +++ b/libc/src/math/generic/exp10m1f.cpp @@ -7,215 +7,10 @@ //===----------------------------------------------------------------------===// #include "src/math/exp10m1f.h" -#include "src/__support/FPUtil/FEnvImpl.h" -#include "src/__support/FPUtil/FPBits.h" -#include "src/__support/FPUtil/PolyEval.h" -#include "src/__support/FPUtil/except_value_utils.h" -#include "src/__support/FPUtil/multiply_add.h" -#include "src/__support/FPUtil/rounding_mode.h" -#include "src/__support/common.h" -#include "src/__support/libc_errno.h" -#include "src/__support/macros/config.h" -#include "src/__support/macros/optimization.h" -#include "src/__support/math/exp10f_utils.h" +#include "src/__support/math/exp10m1f.h" namespace LIBC_NAMESPACE_DECL { -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS -static constexpr size_t N_EXCEPTS_LO = 11; - -static constexpr fputil::ExceptValues EXP10M1F_EXCEPTS_LO = - {{ - // x = 0x1.0fe54ep-11, exp10m1f(x) = 0x1.3937eep-10 (RZ) - {0x3a07'f2a7U, 0x3a9c'9bf7U, 1U, 0U, 1U}, - // x = 0x1.80e6eap-11, exp10m1f(x) = 0x1.bb8272p-10 (RZ) - {0x3a40'7375U, 0x3add'c139U, 1U, 0U, 1U}, - // x = -0x1.2a33bcp-51, exp10m1f(x) = -0x1.57515ep-50 (RZ) - {0xa615'19deU, 0xa6ab'a8afU, 0U, 1U, 0U}, - // x = -0x0p+0, exp10m1f(x) = -0x0p+0 (RZ) - {0x8000'0000U, 0x8000'0000U, 0U, 0U, 0U}, - // x = -0x1.b59e08p-31, exp10m1f(x) = -0x1.f7d356p-30 (RZ) - {0xb05a'cf04U, 0xb0fb'e9abU, 0U, 1U, 1U}, - // x = -0x1.bf342p-12, exp10m1f(x) = -0x1.014e02p-10 (RZ) - {0xb9df'9a10U, 0xba80'a701U, 0U, 1U, 0U}, - // x = -0x1.6207fp-11, exp10m1f(x) = -0x1.9746cap-10 (RZ) - {0xba31'03f8U, 0xbacb'a365U, 0U, 1U, 1U}, - // x = -0x1.bd0c66p-11, exp10m1f(x) = -0x1.ffe168p-10 (RZ) - {0xba5e'8633U, 0xbaff'f0b4U, 0U, 1U, 1U}, - // x = -0x1.ffd84cp-10, exp10m1f(x) = -0x1.25faf2p-8 (RZ) - {0xbaff'ec26U, 0xbb92'fd79U, 0U, 1U, 0U}, - // x = -0x1.a74172p-9, exp10m1f(x) = -0x1.e57be2p-8 (RZ) - {0xbb53'a0b9U, 0xbbf2'bdf1U, 0U, 1U, 1U}, - // x = -0x1.cb694cp-9, exp10m1f(x) = -0x1.0764e4p-7 (RZ) - {0xbb65'b4a6U, 0xbc03'b272U, 0U, 1U, 0U}, - }}; - -static constexpr size_t N_EXCEPTS_HI = 19; - -static constexpr fputil::ExceptValues EXP10M1F_EXCEPTS_HI = - {{ - // (input, RZ output, RU offset, RD offset, RN offset) - // x = 0x1.8d31eep-8, exp10m1f(x) = 0x1.cc7e4cp-7 (RZ) - {0x3bc6'98f7U, 0x3c66'3f26U, 1U, 0U, 1U}, - // x = 0x1.915fcep-8, exp10m1f(x) = 0x1.d15f72p-7 (RZ) - {0x3bc8'afe7U, 0x3c68'afb9U, 1U, 0U, 0U}, - // x = 0x1.bcf982p-8, exp10m1f(x) = 0x1.022928p-6 (RZ) - {0x3bde'7cc1U, 0x3c81'1494U, 1U, 0U, 1U}, - // x = 0x1.99ff0ap-7, exp10m1f(x) = 0x1.dee416p-6 (RZ) - {0x3c4c'ff85U, 0x3cef'720bU, 1U, 0U, 0U}, - // x = 0x1.75ea14p-6, exp10m1f(x) = 0x1.b9ff16p-5 (RZ) - {0x3cba'f50aU, 0x3d5c'ff8bU, 1U, 0U, 0U}, - // x = 0x1.f81b64p-6, exp10m1f(x) = 0x1.2cb6bcp-4 (RZ) - {0x3cfc'0db2U, 0x3d96'5b5eU, 1U, 0U, 0U}, - // x = 0x1.fafecp+3, exp10m1f(x) = 0x1.8c880ap+52 (RZ) - {0x417d'7f60U, 0x59c6'4405U, 1U, 0U, 0U}, - // x = -0x1.3bf094p-8, exp10m1f(x) = -0x1.69ba4ap-7 (RZ) - {0xbb9d'f84aU, 0xbc34'dd25U, 0U, 1U, 0U}, - // x = -0x1.4558bcp-8, exp10m1f(x) = -0x1.746fb8p-7 (RZ) - {0xbba2'ac5eU, 0xbc3a'37dcU, 0U, 1U, 1U}, - // x = -0x1.4bb43p-8, exp10m1f(x) = -0x1.7babe4p-7 (RZ) - {0xbba5'da18U, 0xbc3d'd5f2U, 0U, 1U, 1U}, - // x = -0x1.776cc8p-8, exp10m1f(x) = -0x1.ad62c4p-7 (RZ) - {0xbbbb'b664U, 0xbc56'b162U, 0U, 1U, 0U}, - // x = -0x1.f024cp-8, exp10m1f(x) = -0x1.1b20d6p-6 (RZ) - {0xbbf8'1260U, 0xbc8d'906bU, 0U, 1U, 1U}, - // x = -0x1.f510eep-8, exp10m1f(x) = -0x1.1de9aap-6 (RZ) - {0xbbfa'8877U, 0xbc8e'f4d5U, 0U, 1U, 0U}, - // x = -0x1.0b43c4p-7, exp10m1f(x) = -0x1.30d418p-6 (RZ) - {0xbc05'a1e2U, 0xbc98'6a0cU, 0U, 1U, 0U}, - // x = -0x1.245ee4p-7, exp10m1f(x) = -0x1.4d2b86p-6 (RZ) - {0xbc12'2f72U, 0xbca6'95c3U, 0U, 1U, 0U}, - // x = -0x1.f9f2dap-7, exp10m1f(x) = -0x1.1e2186p-5 (RZ) - {0xbc7c'f96dU, 0xbd0f'10c3U, 0U, 1U, 0U}, - // x = -0x1.08e42p-6, exp10m1f(x) = -0x1.2b5c4p-5 (RZ) - {0xbc84'7210U, 0xbd15'ae20U, 0U, 1U, 1U}, - // x = -0x1.0cdc44p-5, exp10m1f(x) = -0x1.2a2152p-4 (RZ) - {0xbd06'6e22U, 0xbd95'10a9U, 0U, 1U, 1U}, - // x = -0x1.ca4322p-5, exp10m1f(x) = -0x1.ef073p-4 (RZ) - {0xbd65'2191U, 0xbdf7'8398U, 0U, 1U, 1U}, - }}; -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - -LLVM_LIBC_FUNCTION(float, exp10m1f, (float x)) { - using FPBits = fputil::FPBits; - FPBits xbits(x); - - uint32_t x_u = xbits.uintval(); - uint32_t x_abs = x_u & 0x7fff'ffffU; - - // When x >= log10(2^128), or x is nan - if (LIBC_UNLIKELY(xbits.is_pos() && x_u >= 0x421a'209bU)) { - if (xbits.is_finite()) { - int rounding = fputil::quick_get_round(); - if (rounding == FE_DOWNWARD || rounding == FE_TOWARDZERO) - return FPBits::max_normal().get_val(); - - fputil::set_errno_if_required(ERANGE); - fputil::raise_except_if_required(FE_OVERFLOW); - } - - // x >= log10(2^128) and 10^x - 1 rounds to +inf, or x is +inf or nan - return x + FPBits::inf().get_val(); - } - - // When |x| <= log10(2) * 2^(-6) - if (LIBC_UNLIKELY(x_abs <= 0x3b9a'209bU)) { -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS - if (auto r = EXP10M1F_EXCEPTS_LO.lookup(x_u); LIBC_UNLIKELY(r.has_value())) - return r.value(); -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - - double dx = x; - double dx_sq = dx * dx; - double c0 = dx * Exp10Base::COEFFS[0]; - double c1 = - fputil::multiply_add(dx, Exp10Base::COEFFS[2], Exp10Base::COEFFS[1]); - double c2 = - fputil::multiply_add(dx, Exp10Base::COEFFS[4], Exp10Base::COEFFS[3]); - // 10^dx - 1 ~ (1 + COEFFS[0] * dx + ... + COEFFS[4] * dx^5) - 1 - // = COEFFS[0] * dx + ... + COEFFS[4] * dx^5 - return static_cast(fputil::polyeval(dx_sq, c0, c1, c2)); - } - - // When x <= log10(2^-25), or x is nan - if (LIBC_UNLIKELY(x_u >= 0xc0f0d2f1)) { - // exp10m1(-inf) = -1 - if (xbits.is_inf()) - return -1.0f; - // exp10m1(nan) = nan - if (xbits.is_nan()) - return x; - - int rounding = fputil::quick_get_round(); - if (rounding == FE_UPWARD || rounding == FE_TOWARDZERO || - (rounding == FE_TONEAREST && x_u == 0xc0f0d2f1)) - return -0x1.ffff'fep-1f; // -1.0f + 0x1.0p-24f - - fputil::set_errno_if_required(ERANGE); - fputil::raise_except_if_required(FE_UNDERFLOW); - return -1.0f; - } - - // Exact outputs when x = 1, 2, ..., 10. - // Quick check mask: 0x800f'ffffU = ~(bits of 1.0f | ... | bits of 10.0f) - if (LIBC_UNLIKELY((x_u & 0x800f'ffffU) == 0)) { - switch (x_u) { - case 0x3f800000U: // x = 1.0f - return 9.0f; - case 0x40000000U: // x = 2.0f - return 99.0f; - case 0x40400000U: // x = 3.0f - return 999.0f; - case 0x40800000U: // x = 4.0f - return 9'999.0f; - case 0x40a00000U: // x = 5.0f - return 99'999.0f; - case 0x40c00000U: // x = 6.0f - return 999'999.0f; - case 0x40e00000U: // x = 7.0f - return 9'999'999.0f; - case 0x41000000U: { // x = 8.0f - int rounding = fputil::quick_get_round(); - if (rounding == FE_UPWARD || rounding == FE_TONEAREST) - return 100'000'000.0f; - return 99'999'992.0f; - } - case 0x41100000U: { // x = 9.0f - int rounding = fputil::quick_get_round(); - if (rounding == FE_UPWARD || rounding == FE_TONEAREST) - return 1'000'000'000.0f; - return 999'999'936.0f; - } - case 0x41200000U: { // x = 10.0f - int rounding = fputil::quick_get_round(); - if (rounding == FE_UPWARD || rounding == FE_TONEAREST) - return 10'000'000'000.0f; - return 9'999'998'976.0f; - } - } - } - -#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS - if (auto r = EXP10M1F_EXCEPTS_HI.lookup(x_u); LIBC_UNLIKELY(r.has_value())) - return r.value(); -#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS - - // Range reduction: 10^x = 2^(mid + hi) * 10^lo - // rr = (2^(mid + hi), lo) - auto rr = exp_b_range_reduc(x); - - // The low part is approximated by a degree-5 minimax polynomial. - // 10^lo ~ 1 + COEFFS[0] * lo + ... + COEFFS[4] * lo^5 - double lo_sq = rr.lo * rr.lo; - double c0 = fputil::multiply_add(rr.lo, Exp10Base::COEFFS[0], 1.0); - double c1 = - fputil::multiply_add(rr.lo, Exp10Base::COEFFS[2], Exp10Base::COEFFS[1]); - double c2 = - fputil::multiply_add(rr.lo, Exp10Base::COEFFS[4], Exp10Base::COEFFS[3]); - double exp10_lo = fputil::polyeval(lo_sq, c0, c1, c2); - // 10^x - 1 = 2^(mid + hi) * 10^lo - 1 - // ~ mh * exp10_lo - 1 - return static_cast(fputil::multiply_add(exp10_lo, rr.mh, -1.0)); -} +LLVM_LIBC_FUNCTION(float, exp10m1f, (float x)) { return math::exp10m1f(x); } } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/string/memory_utils/generic/inline_strlen.h b/libc/src/string/memory_utils/generic/inline_strlen.h index 5e553e301d4da..d7435afb03719 100644 --- a/libc/src/string/memory_utils/generic/inline_strlen.h +++ b/libc/src/string/memory_utils/generic/inline_strlen.h @@ -32,14 +32,15 @@ string_length(const char *src) { const cpp::simd *aligned = reinterpret_cast *>( __builtin_align_down(src, alignment)); - cpp::simd chars = cpp::load_aligned>(aligned); + cpp::simd chars = cpp::load>(aligned, /*aligned=*/true); cpp::simd_mask mask = chars == null_byte; size_t offset = src - reinterpret_cast(aligned); if (cpp::any_of(shift_mask(mask, offset))) return cpp::find_first_set(shift_mask(mask, offset)); for (;;) { - cpp::simd chars = cpp::load_aligned>(++aligned); + cpp::simd chars = cpp::load>(++aligned, + /*aligned=*/true); cpp::simd_mask mask = chars == null_byte; if (cpp::any_of(mask)) return (reinterpret_cast(aligned) - src) + diff --git a/libc/test/shared/CMakeLists.txt b/libc/test/shared/CMakeLists.txt index 9f3e9838d6b78..13a0aae5d4c67 100644 --- a/libc/test/shared/CMakeLists.txt +++ b/libc/test/shared/CMakeLists.txt @@ -36,6 +36,7 @@ add_fp_unittest( libc.src.__support.math.cospif libc.src.__support.math.cospif16 libc.src.__support.math.dsqrtl + libc.src.__support.math.exp10m1f libc.src.__support.math.erff libc.src.__support.math.exp libc.src.__support.math.exp10 diff --git a/libc/test/shared/shared_math_test.cpp b/libc/test/shared/shared_math_test.cpp index 655e7fb48230e..25bf5ad8ae411 100644 --- a/libc/test/shared/shared_math_test.cpp +++ b/libc/test/shared/shared_math_test.cpp @@ -57,6 +57,7 @@ TEST(LlvmLibcSharedMathTest, AllFloat) { EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::cosf(0.0f)); EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::coshf(0.0f)); EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::cospif(0.0f)); + EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::exp10m1f(0.0f)); EXPECT_FP_EQ(0x0p+0f, LIBC_NAMESPACE::shared::erff(0.0f)); EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::exp10f(0.0f)); EXPECT_FP_EQ(0x1p+0f, LIBC_NAMESPACE::shared::expf(0.0f)); diff --git a/libc/test/src/__support/CPP/simd_test.cpp b/libc/test/src/__support/CPP/simd_test.cpp index c8f34df8ab028..8bead8461d649 100644 --- a/libc/test/src/__support/CPP/simd_test.cpp +++ b/libc/test/src/__support/CPP/simd_test.cpp @@ -86,3 +86,65 @@ TEST(LlvmLibcSIMDTest, SplitConcat) { cpp::simd n = cpp::concat(c, c, c, c, c, c, c, c); EXPECT_TRUE(cpp::all_of(n == ~0)); } + +TEST(LlvmLibcSIMDTest, LoadStore) { + constexpr size_t SIZE = cpp::simd_size_v>; + alignas(alignof(cpp::simd)) int buf[SIZE]; + + cpp::simd v1 = cpp::splat(1); + cpp::store(v1, buf); + cpp::simd v2 = cpp::load>(buf); + + EXPECT_TRUE(cpp::all_of(v1 == 1)); + EXPECT_TRUE(cpp::all_of(v2 == 1)); + + cpp::simd v3 = cpp::splat(2); + cpp::store(v3, buf, /*aligned=*/true); + cpp::simd v4 = cpp::load>(buf, /*aligned=*/true); + + EXPECT_TRUE(cpp::all_of(v3 == 2)); + EXPECT_TRUE(cpp::all_of(v4 == 2)); +} + +TEST(LlvmLibcSIMDTest, MaskedLoadStore) { + constexpr size_t SIZE = cpp::simd_size_v>; + alignas(alignof(cpp::simd)) int buf[SIZE] = {0}; + + cpp::simd mask = cpp::iota(0) % 2 == 0; + cpp::simd v1 = cpp::splat(1); + + cpp::store_masked>(mask, v1, buf); + cpp::simd v2 = cpp::load_masked>(mask, buf); + + EXPECT_TRUE(cpp::all_of((v2 == 1) == mask)); +} + +TEST(LlvmLibcSIMDTest, GatherScatter) { + constexpr int SIZE = cpp::simd_size_v>; + alignas(alignof(cpp::simd)) int buf[SIZE]; + + cpp::simd mask = cpp::iota(1); + cpp::simd idx = cpp::iota(0); + cpp::simd v1 = cpp::splat(1); + + cpp::scatter>(mask, idx, v1, buf); + cpp::simd v2 = cpp::gather>(mask, idx, buf); + + EXPECT_TRUE(cpp::all_of(v1 == 1)); + EXPECT_TRUE(cpp::all_of(v2 == 1)); +} + +TEST(LlvmLibcSIMDTest, MaskedCompressExpand) { + constexpr size_t SIZE = cpp::simd_size_v>; + alignas(alignof(cpp::simd)) int buf[SIZE] = {0}; + + cpp::simd mask_expand = cpp::iota(0) % 2 == 0; + cpp::simd mask_compress = 1; + + cpp::simd v1 = cpp::iota(0); + + cpp::compress>(mask_compress, v1, buf); + cpp::simd v2 = cpp::expand>(mask_expand, buf); + + EXPECT_TRUE(cpp::all_of(!mask_expand || v2 <= SIZE / 2)); +} diff --git a/libclc/Maintainers.md b/libclc/Maintainers.md index ac869b6945db5..695695c00be56 100644 --- a/libclc/Maintainers.md +++ b/libclc/Maintainers.md @@ -10,8 +10,14 @@ The following people are the active maintainers for the project. Please reach out to them for code reviews, questions about their area of expertise, or other assistance. -Fraser Cormack \ -fraser@codeplay.com (email), [frasercrmck](https://github.com/frasercrmck) (GitHub) - Tom Stellard \ tstellar@redhat.com (email), [tstellar](https://github.com/tstellar) (GitHub) + +## Inactive Maintainers + +The following people have graciously spent time performing maintainership +responsibilities but are no longer active in that role. Thank you for all your +help with the success of the project! + +Fraser Cormack \ +frasercrmck@pm.me (email), [frasercrmck](https://github.com/frasercrmck) (GitHub) diff --git a/libcxx/include/__flat_map/flat_map.h b/libcxx/include/__flat_map/flat_map.h index 31ba9bc0b91ac..7bb235ba76503 100644 --- a/libcxx/include/__flat_map/flat_map.h +++ b/libcxx/include/__flat_map/flat_map.h @@ -29,7 +29,6 @@ #include <__flat_map/key_value_iterator.h> #include <__flat_map/sorted_unique.h> #include <__flat_map/utils.h> -#include <__functional/invoke.h> #include <__functional/is_transparent.h> #include <__functional/operations.h> #include <__fwd/memory.h> @@ -48,7 +47,6 @@ #include <__ranges/container_compatible_range.h> #include <__ranges/drop_view.h> #include <__ranges/from_range.h> -#include <__ranges/ref_view.h> #include <__ranges/size.h> #include <__ranges/subrange.h> #include <__ranges/zip_view.h> diff --git a/libcxx/include/__flat_map/flat_multimap.h b/libcxx/include/__flat_map/flat_multimap.h index abaacf9e3cda3..96d945405cffe 100644 --- a/libcxx/include/__flat_map/flat_multimap.h +++ b/libcxx/include/__flat_map/flat_multimap.h @@ -22,7 +22,6 @@ #include <__algorithm/upper_bound.h> #include <__assert> #include <__compare/synth_three_way.h> -#include <__concepts/convertible_to.h> #include <__concepts/swappable.h> #include <__config> #include <__cstddef/byte.h> @@ -30,7 +29,6 @@ #include <__flat_map/key_value_iterator.h> #include <__flat_map/sorted_equivalent.h> #include <__flat_map/utils.h> -#include <__functional/invoke.h> #include <__functional/is_transparent.h> #include <__functional/operations.h> #include <__fwd/vector.h> @@ -47,7 +45,6 @@ #include <__ranges/container_compatible_range.h> #include <__ranges/drop_view.h> #include <__ranges/from_range.h> -#include <__ranges/ref_view.h> #include <__ranges/size.h> #include <__ranges/subrange.h> #include <__ranges/zip_view.h> @@ -57,14 +54,12 @@ #include <__type_traits/is_allocator.h> #include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/is_same.h> -#include <__type_traits/maybe_const.h> #include <__utility/exception_guard.h> #include <__utility/move.h> #include <__utility/pair.h> #include <__utility/scope_guard.h> #include <__vector/vector.h> #include -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/libcxx/include/__flat_map/key_value_iterator.h b/libcxx/include/__flat_map/key_value_iterator.h index d04a23d1f8606..795651a07937b 100644 --- a/libcxx/include/__flat_map/key_value_iterator.h +++ b/libcxx/include/__flat_map/key_value_iterator.h @@ -20,7 +20,6 @@ #include <__type_traits/conditional.h> #include <__utility/forward.h> #include <__utility/move.h> -#include <__utility/pair.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/libcxx/include/__flat_set/flat_multiset.h b/libcxx/include/__flat_set/flat_multiset.h index 65f4161a8c34c..b1a4917659c49 100644 --- a/libcxx/include/__flat_set/flat_multiset.h +++ b/libcxx/include/__flat_set/flat_multiset.h @@ -13,54 +13,40 @@ #include <__algorithm/equal_range.h> #include <__algorithm/lexicographical_compare_three_way.h> #include <__algorithm/lower_bound.h> -#include <__algorithm/min.h> #include <__algorithm/ranges_equal.h> #include <__algorithm/ranges_inplace_merge.h> #include <__algorithm/ranges_is_sorted.h> #include <__algorithm/ranges_sort.h> -#include <__algorithm/ranges_unique.h> #include <__algorithm/remove_if.h> #include <__algorithm/upper_bound.h> #include <__assert> #include <__compare/synth_three_way.h> -#include <__concepts/convertible_to.h> #include <__concepts/swappable.h> #include <__config> -#include <__cstddef/byte.h> -#include <__cstddef/ptrdiff_t.h> -#include <__flat_map/key_value_iterator.h> #include <__flat_map/sorted_equivalent.h> #include <__flat_set/ra_iterator.h> #include <__flat_set/utils.h> -#include <__functional/invoke.h> #include <__functional/is_transparent.h> #include <__functional/operations.h> #include <__fwd/vector.h> #include <__iterator/concepts.h> -#include <__iterator/distance.h> #include <__iterator/iterator_traits.h> #include <__iterator/prev.h> -#include <__iterator/ranges_iterator_traits.h> #include <__iterator/reverse_iterator.h> #include <__memory/allocator_traits.h> #include <__memory/uses_allocator.h> #include <__memory/uses_allocator_construction.h> -#include <__ranges/access.h> #include <__ranges/concepts.h> #include <__ranges/container_compatible_range.h> #include <__ranges/drop_view.h> #include <__ranges/from_range.h> -#include <__ranges/ref_view.h> #include <__ranges/size.h> #include <__ranges/subrange.h> -#include <__ranges/zip_view.h> -#include <__type_traits/conjunction.h> #include <__type_traits/container_traits.h> #include <__type_traits/invoke.h> #include <__type_traits/is_allocator.h> #include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/is_same.h> -#include <__type_traits/maybe_const.h> #include <__utility/as_const.h> #include <__utility/exception_guard.h> #include <__utility/move.h> diff --git a/libcxx/include/__flat_set/flat_set.h b/libcxx/include/__flat_set/flat_set.h index cc788bda544de..5fa1f2d8acb9b 100644 --- a/libcxx/include/__flat_set/flat_set.h +++ b/libcxx/include/__flat_set/flat_set.h @@ -12,7 +12,6 @@ #include <__algorithm/lexicographical_compare_three_way.h> #include <__algorithm/lower_bound.h> -#include <__algorithm/min.h> #include <__algorithm/ranges_adjacent_find.h> #include <__algorithm/ranges_equal.h> #include <__algorithm/ranges_inplace_merge.h> @@ -24,20 +23,16 @@ #include <__compare/synth_three_way.h> #include <__concepts/swappable.h> #include <__config> -#include <__cstddef/ptrdiff_t.h> #include <__flat_map/sorted_unique.h> #include <__flat_set/ra_iterator.h> #include <__flat_set/utils.h> -#include <__functional/invoke.h> #include <__functional/is_transparent.h> #include <__functional/operations.h> #include <__fwd/vector.h> #include <__iterator/concepts.h> -#include <__iterator/distance.h> #include <__iterator/iterator_traits.h> #include <__iterator/next.h> #include <__iterator/prev.h> -#include <__iterator/ranges_iterator_traits.h> #include <__iterator/reverse_iterator.h> #include <__memory/allocator_traits.h> #include <__memory/uses_allocator.h> @@ -47,10 +42,7 @@ #include <__ranges/container_compatible_range.h> #include <__ranges/drop_view.h> #include <__ranges/from_range.h> -#include <__ranges/ref_view.h> #include <__ranges/size.h> -#include <__ranges/subrange.h> -#include <__type_traits/conjunction.h> #include <__type_traits/container_traits.h> #include <__type_traits/invoke.h> #include <__type_traits/is_allocator.h> diff --git a/libcxx/include/__ranges/join_view.h b/libcxx/include/__ranges/join_view.h index 327b349f476a7..364f056d8d2cf 100644 --- a/libcxx/include/__ranges/join_view.h +++ b/libcxx/include/__ranges/join_view.h @@ -410,8 +410,13 @@ struct __segmented_iterator_traits<_JoinViewIterator> { static constexpr _LIBCPP_HIDE_FROM_ABI _JoinViewIterator __compose(__segment_iterator __seg_iter, __local_iterator __local_iter) { - return _JoinViewIterator( - std::move(__seg_iter).__get_data(), std::move(__seg_iter).__get_iter(), std::move(__local_iter)); + auto&& __parent = std::move(__seg_iter).__get_data(); + auto&& __outer = std::move(__seg_iter).__get_iter(); + if (__local_iter == ranges::end(*__outer)) { + ++__outer; + return _JoinViewIterator(*__parent, __outer); + } + return _JoinViewIterator(__parent, __outer, std::move(__local_iter)); } }; diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in index dc1933324ef79..5e96adc1aaa65 100644 --- a/libcxx/include/module.modulemap.in +++ b/libcxx/include/module.modulemap.in @@ -1847,7 +1847,10 @@ module std [system] { module ranges { module access { header "__ranges/access.h" } - module all { header "__ranges/all.h" } + module all { + header "__ranges/all.h" + export std.ranges.ref_view + } module as_rvalue_view { header "__ranges/as_rvalue_view.h" } module chunk_by_view { header "__ranges/chunk_by_view.h" diff --git a/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/ranges.find.pass.cpp b/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/ranges.find.pass.cpp index 5b4abc45b6f4f..3303b4a76f467 100644 --- a/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/ranges.find.pass.cpp +++ b/libcxx/test/std/algorithms/alg.nonmodifying/alg.find/ranges.find.pass.cpp @@ -272,57 +272,100 @@ class Comparable { friend bool operator==(const Comparable& lhs, long long rhs) { return comparable_data[lhs.index_] == rhs; } }; -void test_deque() { - { // empty deque - std::deque data; - assert(std::ranges::find(data, 4) == data.end()); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); - } - - { // single element - match - std::deque data = {4}; - assert(std::ranges::find(data, 4) == data.begin()); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.begin()); - } - - { // single element - no match - std::deque data = {3}; - assert(std::ranges::find(data, 4) == data.end()); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); - } - - // many elements - for (auto size : {2, 3, 1023, 1024, 1025, 2047, 2048, 2049}) { - { // last element match +void test_segmented_iterator_types() { + // Test the optimized find algorithm for types that implement the segment iterator trait + // deque + { + { // empty deque std::deque data; - data.resize(size); - std::fill(data.begin(), data.end(), 3); - data[size - 1] = 4; - assert(std::ranges::find(data, 4) == data.end() - 1); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.end() - 1); + assert(std::ranges::find(data, 4) == data.end()); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); } - { // second-last element match - std::deque data; - data.resize(size); - std::fill(data.begin(), data.end(), 3); - data[size - 2] = 4; - assert(std::ranges::find(data, 4) == data.end() - 2); - assert(std::ranges::find(data.begin(), data.end(), 4) == data.end() - 2); + { // single element - match + std::deque data = {4}; + assert(std::ranges::find(data, 4) == data.begin()); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.begin()); } - { // no match - std::deque data; - data.resize(size); - std::fill(data.begin(), data.end(), 3); + { // single element - no match + std::deque data = {3}; assert(std::ranges::find(data, 4) == data.end()); assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); } + + // many elements + for (auto size : {2, 3, 1023, 1024, 1025, 2047, 2048, 2049}) { + { // last element match + std::deque data; + data.resize(size); + std::fill(data.begin(), data.end(), 3); + data[size - 1] = 4; + assert(std::ranges::find(data, 4) == data.end() - 1); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.end() - 1); + } + + { // second-last element match + std::deque data; + data.resize(size); + std::fill(data.begin(), data.end(), 3); + data[size - 2] = 4; + assert(std::ranges::find(data, 4) == data.end() - 2); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.end() - 2); + } + + { // no match + std::deque data; + data.resize(size); + std::fill(data.begin(), data.end(), 3); + assert(std::ranges::find(data, 4) == data.end()); + assert(std::ranges::find(data.begin(), data.end(), 4) == data.end()); + } + } + } + // join_view ranges adaptor + { + { // single element - match + int data[1][1] = {{4}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::begin(joined)); + } + { // single element - no match + // (reproducer for https://llvm.org/PR158279, where the iterator would never reach the end sentinel) + int data[1][1] = {{3}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::end(joined)); + } + { // several sub-arrays of size 1 - match + int data[3][1] = {{0}, {4}, {0}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::next(std::ranges::begin(joined))); + } + { // several sub-arrays of size 2 - match in second element of an array + int data[3][2] = {{0, 0}, {0, 4}, {0, 0}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::next(std::ranges::begin(joined), 3)); + } + { // vector of empty vectors + std::vector> data = {{}, {}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::end(joined)); + } + { // vector of variably sized vectors - match + std::vector> data = {{}, {}, {3, 4}, {}, {}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::next(std::ranges::begin(joined))); + } + { // vector of variably sized vectors - no match + std::vector> data = {{}, {}, {3, 5}, {}, {}}; + auto joined = std::views::join(data); + assert(std::ranges::find(joined, 4) == std::ranges::end(joined)); + } } } int main(int, char**) { - test_deque(); + test_segmented_iterator_types(); test(); static_assert(test()); diff --git a/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/is_implicit_lifetime.pass.cpp b/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/is_implicit_lifetime.pass.cpp index a68f3f40e3647..5264e7700e3d9 100644 --- a/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/is_implicit_lifetime.pass.cpp +++ b/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/is_implicit_lifetime.pass.cpp @@ -17,9 +17,12 @@ #include #include +#include +#include #include #include #include +#include #include "test_macros.h" #include "type_algorithms.h" @@ -216,8 +219,16 @@ constexpr bool test() { // C++ standard library types + // These types are guaranteed to be implicit-lifetime. + test_is_implicit_lifetime>(); + test_is_implicit_lifetime>(); + test_is_implicit_lifetime>(); + +#ifdef _LIBCPP_VERSION + // These types should be implicit-lifetime, but they are not guaranteed to be so. test_is_implicit_lifetime>(); test_is_implicit_lifetime>(); +#endif // Standard C23 types diff --git a/libcxx/utils/compare-benchmarks b/libcxx/utils/compare-benchmarks index 43db3d858ee32..18a448ab434c7 100755 --- a/libcxx/utils/compare-benchmarks +++ b/libcxx/utils/compare-benchmarks @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import argparse +import functools import pathlib import re import statistics @@ -62,35 +63,35 @@ def plain_text_comparison(data, metric, baseline_name=None, candidate_name=None) """ Create a tabulated comparison of the baseline and the candidate for the given metric. """ + # Compute additional info in new columns. In text mode, we can assume that we are + # comparing exactly two data sets (suffixed _0 and _1). + data['difference'] = data[f'{metric}_1'] - data[f'{metric}_0'] + data['percent'] = 100 * (data['difference'] / data[f'{metric}_0']) + data = data.replace(numpy.nan, None).sort_values(by='benchmark') # avoid NaNs in tabulate output headers = ['Benchmark', baseline_name, candidate_name, 'Difference', '% Difference'] fmt = (None, '.2f', '.2f', '.2f', '.2f') - table = data[['benchmark', f'{metric}_baseline', f'{metric}_candidate', 'difference', 'percent']].set_index('benchmark') + table = data[['benchmark', f'{metric}_0', f'{metric}_1', 'difference', 'percent']].set_index('benchmark') return tabulate.tabulate(table, headers=headers, floatfmt=fmt, numalign='right') -def create_chart(data, metric, subtitle=None, baseline_name=None, candidate_name=None): +def create_chart(data, metric, subtitle=None, series_names=None): """ - Create a bar chart comparing the given metric between the baseline and the candidate. + Create a bar chart comparing the given metric across the provided series. """ - data = data.sort_values(by='benchmark').rename(columns={ - f'{metric}_baseline': baseline_name, - f'{metric}_candidate': candidate_name - }) - figure = plotly.express.bar(data, title=f'{baseline_name} vs {candidate_name}', - subtitle=subtitle, - x='benchmark', y=[baseline_name, candidate_name], barmode='group') + data = data.sort_values(by='benchmark').rename(columns={f'{metric}_{i}': series_names[i] for i in range(len(series_names))}) + title = ' vs '.join(series_names) + figure = plotly.express.bar(data, title=title, subtitle=subtitle, x='benchmark', y=series_names, barmode='group') figure.update_layout(xaxis_title='', yaxis_title='', legend_title='') return figure def main(argv): parser = argparse.ArgumentParser( prog='compare-benchmarks', - description='Compare the results of two sets of benchmarks in LNT format.', + description='Compare the results of multiple sets of benchmarks in LNT format.', epilog='This script depends on the modules listed in `libcxx/utils/requirements.txt`.') - parser.add_argument('baseline', type=argparse.FileType('r'), - help='Path to a LNT format file containing the benchmark results for the baseline.') - parser.add_argument('candidate', type=argparse.FileType('r'), - help='Path to a LNT format file containing the benchmark results for the candidate.') + parser.add_argument('files', type=argparse.FileType('r'), nargs='+', + help='Path to LNT format files containing the benchmark results to compare. In the text format, ' + 'exactly two files must be compared.') parser.add_argument('--output', '-o', type=pathlib.Path, required=False, help='Path of a file where to output the resulting comparison. If the output format is `text`, ' 'default to stdout. If the output format is `chart`, default to a temporary file which is ' @@ -107,43 +108,52 @@ def main(argv): parser.add_argument('--open', action='store_true', help='Whether to automatically open the generated HTML file when finished. This option only makes sense ' 'when the output format is `chart`.') - parser.add_argument('--baseline-name', type=str, default='Baseline', - help='Optional name to use for the "baseline" label.') - parser.add_argument('--candidate-name', type=str, default='Candidate', - help='Optional name to use for the "candidate" label.') + parser.add_argument('--series-names', type=str, required=False, + help='Optional comma-delimited list of names to use for the various series. By default, we use ' + 'Baseline and Candidate for two input files, and CandidateN for subsequent inputs.') parser.add_argument('--subtitle', type=str, required=False, help='Optional subtitle to use for the chart. This can be used to help identify the contents of the chart. ' 'This option cannot be used with the plain text output.') args = parser.parse_args(argv) - if args.format == 'text' and args.subtitle is not None: - parser.error('Passing --subtitle makes no sense with --format=text') - - if args.format == 'text' and args.open: - parser.error('Passing --open makes no sense with --format=text') + if args.format == 'text': + if len(args.files) != 2: + parser.error('--format=text requires exactly two input files to compare') + if args.subtitle is not None: + parser.error('Passing --subtitle makes no sense with --format=text') + if args.open: + parser.error('Passing --open makes no sense with --format=text') + + if args.series_names is None: + args.series_names = ['Baseline'] + if len(args.files) == 2: + args.series_names += ['Candidate'] + elif len(args.files) > 2: + args.series_names.extend(f'Candidate{n}' for n in range(1, len(args.files))) + else: + args.series_names = args.series_names.split(',') + if len(args.series_names) != len(args.files): + parser.error(f'Passed incorrect number of series names: got {len(args.series_names)} series names but {len(args.files)} inputs to compare') - baseline = pandas.DataFrame(parse_lnt(args.baseline.readlines())) - candidate = pandas.DataFrame(parse_lnt(args.candidate.readlines())) + # Parse the raw LNT data and store each input in a dataframe + lnt_inputs = [parse_lnt(file.readlines()) for file in args.files] + inputs = [pandas.DataFrame(lnt).rename(columns={args.metric: f'{args.metric}_{i}'}) for (i, lnt) in enumerate(lnt_inputs)] - # Join the baseline and the candidate into a single dataframe and add some new columns - data = baseline.merge(candidate, how='outer', on='benchmark', suffixes=('_baseline', '_candidate')) - data['difference'] = data[f'{args.metric}_candidate'] - data[f'{args.metric}_baseline'] - data['percent'] = 100 * (data['difference'] / data[f'{args.metric}_baseline']) + # Join the inputs into a single dataframe + data = functools.reduce(lambda a, b: a.merge(b, how='outer', on='benchmark'), inputs) if args.filter is not None: keeplist = [b for b in data['benchmark'] if re.search(args.filter, b) is not None] data = data[data['benchmark'].isin(keeplist)] if args.format == 'chart': - figure = create_chart(data, args.metric, subtitle=args.subtitle, - baseline_name=args.baseline_name, - candidate_name=args.candidate_name) + figure = create_chart(data, args.metric, subtitle=args.subtitle, series_names=args.series_names) do_open = args.output is None or args.open output = args.output or tempfile.NamedTemporaryFile(suffix='.html').name plotly.io.write_html(figure, file=output, auto_open=do_open) else: - diff = plain_text_comparison(data, args.metric, baseline_name=args.baseline_name, - candidate_name=args.candidate_name) + diff = plain_text_comparison(data, args.metric, baseline_name=args.series_names[0], + candidate_name=args.series_names[1]) diff += '\n' if args.output is not None: with open(args.output, 'w') as out: diff --git a/libcxx/utils/libcxx/test/format.py b/libcxx/utils/libcxx/test/format.py index 5765afec399cf..c9dffd1bb7971 100644 --- a/libcxx/utils/libcxx/test/format.py +++ b/libcxx/utils/libcxx/test/format.py @@ -92,6 +92,7 @@ def parseScript(test, preamble): # errors, which doesn't make sense for clang-verify tests because we may want to check # for specific warning diagnostics. _checkBaseSubstitutions(substitutions) + substitutions.append(("%T", tmpDir)) substitutions.append( ("%{build}", "%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe") ) diff --git a/libcxx/utils/visualize-historical b/libcxx/utils/visualize-historical index 00f52f09617bb..ef28e8b17ca4b 100755 --- a/libcxx/utils/visualize-historical +++ b/libcxx/utils/visualize-historical @@ -117,12 +117,12 @@ def create_plot(data, metric, subtitle=None): Create a plot object showing the evolution of each benchmark throughout the given commits for the given metric. """ - data = data.sort_values(by=['date', 'benchmark']) + data = data.sort_values(by=['revlist_order', 'benchmark']) revlist = pandas.unique(data['commit']) # list of all commits in chronological order hover_info = {c: truncate_lines(c.show(), 30, marker='...').replace('\n', '
') for c in revlist} figure = plotly.express.scatter(data, title=f"{revlist[0].shortrev} to {revlist[-1].shortrev}", subtitle=subtitle, - x='date', y=metric, + x='revlist_order', y=metric, symbol='benchmark', color='benchmark', hover_name=[hover_info[c] for c in data['commit']], diff --git a/lld/MachO/SyntheticSections.cpp b/lld/MachO/SyntheticSections.cpp index 979a4ee6d8133..228b84db21c2a 100644 --- a/lld/MachO/SyntheticSections.cpp +++ b/lld/MachO/SyntheticSections.cpp @@ -1687,6 +1687,9 @@ void CStringSection::writeTo(uint8_t *buf) const { void CStringSection::finalizeContents() { uint64_t offset = 0; + // TODO: Call buildCStringPriorities() to support cstring ordering when + // deduplication is off, although this may negatively impact build + // performance. for (CStringInputSection *isec : inputs) { for (const auto &[i, piece] : llvm::enumerate(isec->pieces)) { if (!piece.live) diff --git a/lld/test/ELF/eh-frame-relocation.s b/lld/test/ELF/eh-frame-relocation.s new file mode 100644 index 0000000000000..9c1fe40dba7d3 --- /dev/null +++ b/lld/test/ELF/eh-frame-relocation.s @@ -0,0 +1,29 @@ +# REQUIRES: x86 +## Test that marker relocations are ignored and undefined symbols lead to errors. + +# RUN: rm -rf %t && split-file %s %t && cd %t +# RUN: llvm-mc -filetype=obj -triple=x86_64 a.s -o a.o +# RUN: llvm-mc -filetype=obj -triple=x86_64 abi.s -o abi.o +# RUN: ld.lld a.o abi.o -o a +# RUN: llvm-readelf -s a | FileCheck %s + +# CHECK: 00000000002{{.*}} 0 FUNC GLOBAL DEFAULT [[#]] __gxx_personality_v0 + +# RUN: not ld.lld a.o 2>&1 | FileCheck %s --check-prefix=ERR + +# ERR: error: undefined symbol: __gxx_personality_v0 +# ERR-NEXT: >>> referenced by a.o:(.eh_frame+0x12) + +#--- a.s +.cfi_startproc +.cfi_personality 0, __gxx_personality_v0 + ret +.cfi_endproc + +.section .eh_frame,"a",@unwind +.reloc ., BFD_RELOC_NONE, ignore + +#--- abi.s +.globl __gxx_personality_v0 +.type __gxx_personality_v0, @function +__gxx_personality_v0: diff --git a/lld/test/MachO/ordre-file-cstring.s b/lld/test/MachO/order-file-cstring.s similarity index 100% rename from lld/test/MachO/ordre-file-cstring.s rename to lld/test/MachO/order-file-cstring.s diff --git a/lld/test/wasm/mutable-global-exports.s b/lld/test/wasm/mutable-global-exports.s index 4ffaf0a6cbaf0..1c10e92083b5c 100644 --- a/lld/test/wasm/mutable-global-exports.s +++ b/lld/test/wasm/mutable-global-exports.s @@ -73,6 +73,9 @@ _start: # CHECK-ALL-NEXT: - Name: __wasm_call_ctors # CHECK-ALL-NEXT: Kind: FUNCTION # CHECK-ALL-NEXT: Index: 0 +# CHECK-ALL-NEXT: - Name: __stack_pointer +# CHECK-ALL-NEXT: Kind: GLOBAL +# CHECK-ALL-NEXT: Index: 0 # CHECK-ALL-NEXT: - Name: _start # CHECK-ALL-NEXT: Kind: FUNCTION # CHECK-ALL-NEXT: Index: 1 diff --git a/lld/wasm/Writer.cpp b/lld/wasm/Writer.cpp index 0d36893653110..9a5b56fc52e2f 100644 --- a/lld/wasm/Writer.cpp +++ b/lld/wasm/Writer.cpp @@ -784,6 +784,9 @@ void Writer::calculateExports() { unsigned globalIndex = out.importSec->getNumImportedGlobals() + out.globalSec->numGlobals(); + bool hasMutableGlobals = + out.targetFeaturesSec->features.count("mutable-globals") > 0; + for (Symbol *sym : symtab->symbols()) { if (!sym->isExported()) continue; @@ -801,7 +804,8 @@ void Writer::calculateExports() { } export_ = {name, WASM_EXTERNAL_FUNCTION, f->getExportedFunctionIndex()}; } else if (auto *g = dyn_cast(sym)) { - if (g->getGlobalType()->Mutable && !g->getFile() && !g->forceExport) { + if (!hasMutableGlobals && g->getGlobalType()->Mutable && !g->getFile() && + !g->isExportedExplicit()) { // Avoid exporting mutable globals are linker synthesized (e.g. // __stack_pointer or __tls_base) unless they are explicitly exported // from the command line. diff --git a/lldb/packages/Python/lldbsuite/test/cpu_feature.py b/lldb/packages/Python/lldbsuite/test/cpu_feature.py index 3f43cbba130c0..b46a5acc596f0 100644 --- a/lldb/packages/Python/lldbsuite/test/cpu_feature.py +++ b/lldb/packages/Python/lldbsuite/test/cpu_feature.py @@ -39,9 +39,13 @@ def _is_supported_linux(self, cmd_runner): if err.Fail() or retcode != 0: return output, False - # FIXME: simple substring match, e.g., test for 'sme' will be true if - # 'sme2' or 'smefa64' is present - return None, (self.cpu_info_flag in output) + # Assume that every processor presents the same features. + # Look for the first "Features: ...." line. Features are space separated. + if m := re.search(r"Features\s*: (.*)\n", output): + features = m.group(1).split() + return None, (self.cpu_info_flag in features) + + return 'No "Features:" line found in /proc/cpuinfo', False def _is_supported_darwin(self, cmd_runner): if not self.sysctl_key: diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ModuleDependencyCollector.h b/lldb/source/Plugins/ExpressionParser/Clang/ModuleDependencyCollector.h index 4fe727460fdb9..dcba0d9c34962 100644 --- a/lldb/source/Plugins/ExpressionParser/Clang/ModuleDependencyCollector.h +++ b/lldb/source/Plugins/ExpressionParser/Clang/ModuleDependencyCollector.h @@ -19,8 +19,8 @@ class ModuleDependencyCollectorAdaptor public: ModuleDependencyCollectorAdaptor( std::shared_ptr file_collector) - : clang::ModuleDependencyCollector(""), m_file_collector(file_collector) { - } + : clang::ModuleDependencyCollector("", llvm::vfs::getRealFileSystem()), + m_file_collector(file_collector) {} void addFile(llvm::StringRef Filename, llvm::StringRef FileDst = {}) override { diff --git a/lldb/test/Shell/ObjectFile/ELF/elf-no-shdrs-pt-notes.yaml b/lldb/test/Shell/ObjectFile/ELF/elf-no-shdrs-pt-notes.yaml index 62562f6c5f7bf..1e9c5dfaeab1b 100644 --- a/lldb/test/Shell/ObjectFile/ELF/elf-no-shdrs-pt-notes.yaml +++ b/lldb/test/Shell/ObjectFile/ELF/elf-no-shdrs-pt-notes.yaml @@ -10,8 +10,8 @@ # RUN: -o "image list" \ # RUN: | FileCheck %s -# CHECK: Current executable set to '{{.*/tools/lldb/test/Shell/ObjectFile/ELF/Output/elf-no-shdrs-pt-notes.yaml.tmp}}' (x86_64). -# CHECK: [ 0] 7F1F56D6-7DBB-17BA-C9A3-4417DB52F097-2548414F 0x0000000000000000 {{.*/tools/lldb/test/Shell/ObjectFile/ELF/Output/elf-no-shdrs-pt-notes.yaml.tmp}} +# CHECK: Current executable set to '{{.*}}elf-no-shdrs-pt-notes.yaml.tmp' (x86_64). +# CHECK: [ 0] 7F1F56D6-7DBB-17BA-C9A3-4417DB52F097-2548414F 0x0000000000000000 {{.*}}elf-no-shdrs-pt-notes.yaml.tmp --- !ELF FileHeader: diff --git a/llvm/docs/BugLifeCycle.rst b/llvm/docs/BugLifeCycle.rst index 9bf13e64ed56e..1215af9e47e08 100644 --- a/llvm/docs/BugLifeCycle.rst +++ b/llvm/docs/BugLifeCycle.rst @@ -16,7 +16,7 @@ consistency helps reporters, developers and others to gain a better understanding of what a particular bug state actually means and what to expect might happen next. -At the same time, we aim to not over-specify the life cycle of bugs in +At the same time, we aim not to over-specify the life cycle of bugs in `the LLVM Bug Tracking System `_, as the overall goal is to make it easier to work with and understand the bug reports. @@ -39,7 +39,7 @@ use, needs to be maintained. See the following for details: Reporting bugs ============== -See :doc:`HowToSubmitABug` on further details on how to submit good bug reports. +See :doc:`HowToSubmitABug` for further details on how to submit good bug reports. You can apply `labels `_ to the bug to provide extra information to make the bug easier to discover, such @@ -50,9 +50,9 @@ as a label for the part of the project the bug pertains to. Triaging bugs ============= -Open bugs that have not been marked with the ``confirmed`` label are bugs that +Open bugs that have not been marked with the ``confirmed`` label still need to be triaged. When triage is complete, the ``confirmed`` label -should be added along with any other labels that help to classify the report, +should be added along with any other labels that help classify the report, unless the issue is being :ref:`closed`. The goal of triaging a bug is to make sure a newly reported bug ends up in a @@ -124,13 +124,13 @@ Examples of reasons for resolving are: ``duplicate`` label with a comment pointing to the issue it duplicates. * If there is a sound reason for not fixing the issue (difficulty, ABI, open - research questions, etc), add the ``wontfix`` label and a comment explaining + research questions, etc.), add the ``wontfix`` label and a comment explaining why no changes are expected. * If there is a specific and plausible reason to think that a given bug is otherwise inapplicable or obsolete. One example is an open bug that doesn't contain enough information to clearly understand the problem being reported - (e.g. not reproducible). It is fine to close such a bug, adding with the + (e.g., not reproducible). It is fine to close such a bug, adding the ``worksforme`` label and leaving a comment to encourage the reporter to reopen the bug with more information if it's still reproducible for them. @@ -140,7 +140,7 @@ Examples of reasons for resolving are: Maintenance of metadata ======================= -Project member with write access to the project can create new labels, but we +Project members with write access to the project can create new labels, but we discourage adding ad hoc labels because we want to control the proliferation of labels and avoid single-use labels. If you would like a new label added, please open an issue asking to create an issue label and add the ``infrastructure`` diff --git a/llvm/docs/BuildingADistribution.rst b/llvm/docs/BuildingADistribution.rst index 10e571cdea3f9..81ed8b8723a26 100644 --- a/llvm/docs/BuildingADistribution.rst +++ b/llvm/docs/BuildingADistribution.rst @@ -13,8 +13,8 @@ combination of its sub-project tools for distribution. This document covers useful features of the LLVM build system as well as best practices and general information about packaging LLVM. -If you are new to CMake you may find the :doc:`CMake` or :doc:`CMakePrimer` -documentation useful. Some of the things covered in this document are the inner +If you are new to CMake, you may find the :doc:`CMake` or :doc:`CMakePrimer` +documentation useful. This document covers some of the inner workings of the builds described in the :doc:`AdvancedBuilds` document. General Distribution Guidance @@ -27,7 +27,7 @@ compiler. This is done so that the compiler you distribute benefits from all the bug fixes, performance optimizations and general improvements provided by the new compiler. -In deciding how to build your distribution there are a few trade-offs that you +In deciding how to build your distribution, there are a few trade-offs that you will need to evaluate. The big two are: #. Compile time of the distribution against performance of the built compiler @@ -41,8 +41,8 @@ opportunity for the compiler to optimize. The guidance for minimizing distribution size is to dynamically link LLVM and Clang libraries into the tools to reduce code duplication. This will come at a -substantial performance penalty to the generated binary both because it reduces -optimization opportunity, and because dynamic linking requires resolving symbols +substantial performance penalty to the generated binary, both because it reduces +optimization opportunities and because dynamic linking requires resolving symbols at process launch time, which can be very slow for C++ code. .. _shared_libs: @@ -76,7 +76,7 @@ LLVM testing tools. Alternatively the ``install-distribution`` target, which is recommended for building distributions, only installs specific parts of LLVM as specified at configuration time by *LLVM_DISTRIBUTION_COMPONENTS*. -Additionally by default the ``install`` target will install the LLVM testing +Additionally, by default, the ``install`` target will install the LLVM testing tools as the public tools. This can be changed well by setting *LLVM_INSTALL_TOOLCHAIN_ONLY* to ``On``. The LLVM tools are intended for development and testing of LLVM, and should only be included in distributions diff --git a/llvm/docs/CommandGuide/lit.rst b/llvm/docs/CommandGuide/lit.rst index 359e0c3e81d0e..6a721ebf9cad0 100644 --- a/llvm/docs/CommandGuide/lit.rst +++ b/llvm/docs/CommandGuide/lit.rst @@ -630,13 +630,11 @@ TestRunner.py: %{fs-sep} file system path separator %t temporary file name unique to the test %basename_t The last path component of %t but without the ``.tmp`` extension (deprecated, use ``%{t:stem}`` instead) - %T parent directory of %t (not unique, deprecated, do not use) %% % %/s %s but ``\`` is replaced by ``/`` %/S %S but ``\`` is replaced by ``/`` %/p %p but ``\`` is replaced by ``/`` %/t %t but ``\`` is replaced by ``/`` - %/T %T but ``\`` is replaced by ``/`` %{s:basename} The last path component of %s %{t:stem} The last path component of %t but without the ``.tmp`` extension (alias for %basename_t) %{s:real} %s after expanding all symbolic links and substitute drives @@ -648,12 +646,10 @@ TestRunner.py: %{/S:real} %/S after expanding all symbolic links and substitute drives %{/p:real} %/p after expanding all symbolic links and substitute drives %{/t:real} %/t after expanding all symbolic links and substitute drives - %{/T:real} %/T after expanding all symbolic links and substitute drives %{/s:regex_replacement} %/s but escaped for use in the replacement of a ``s@@@`` command in sed %{/S:regex_replacement} %/S but escaped for use in the replacement of a ``s@@@`` command in sed %{/p:regex_replacement} %/p but escaped for use in the replacement of a ``s@@@`` command in sed %{/t:regex_replacement} %/t but escaped for use in the replacement of a ``s@@@`` command in sed - %{/T:regex_replacement} %/T but escaped for use in the replacement of a ``s@@@`` command in sed %:s On Windows, %/s but a ``:`` is removed if its the second character. Otherwise, %s but with a single leading ``/`` removed. %:S On Windows, %/S but a ``:`` is removed if its the second character. @@ -662,8 +658,6 @@ TestRunner.py: Otherwise, %p but with a single leading ``/`` removed. %:t On Windows, %/t but a ``:`` is removed if its the second character. Otherwise, %t but with a single leading ``/`` removed. - %:T On Windows, %/T but a ``:`` is removed if its the second character. - Otherwise, %T but with a single leading ``/`` removed. %{readfile:} Reads the file specified. ======================= ============== diff --git a/llvm/docs/FuzzingLLVM.rst b/llvm/docs/FuzzingLLVM.rst index a0355d7014c8d..76eb4288a1f2c 100644 --- a/llvm/docs/FuzzingLLVM.rst +++ b/llvm/docs/FuzzingLLVM.rst @@ -33,7 +33,7 @@ clang-proto-fuzzer A |protobuf fuzzer| that compiles valid C++ programs generated from a protobuf class that describes a subset of the C++ language. -This fuzzer accepts clang command line options after `ignore_remaining_args=1`. +This fuzzer accepts clang command-line options after `ignore_remaining_args=1`. For example, the following command will fuzz clang with a higher optimization level: @@ -106,7 +106,7 @@ llvm-opt-fuzzer A |LLVM IR fuzzer| aimed at finding bugs in optimization passes. -It receives optimization pipeline and runs it for each fuzzer input. +It receives an optimization pipeline and runs it for each fuzzer input. Interface of this fuzzer almost directly mirrors ``llvm-isel-fuzzer``. Both ``mtriple`` and ``passes`` arguments are required. Passes are specified in a @@ -117,7 +117,7 @@ this format in the doxygen for ``PassBuilder::parsePassPipeline``. % bin/llvm-opt-fuzzer -ignore_remaining_args=1 -mtriple x86_64 -passes instcombine -Similarly to the ``llvm-isel-fuzzer`` arguments in some predefined configurations +Similarly to the ``llvm-isel-fuzzer``, arguments in some predefined configurations might be embedded directly into the binary file name: .. code-block:: shell @@ -176,7 +176,7 @@ mutations that a fuzzer in LLVM might want. Generic Random Fuzzing ---------------------- -The most basic form of input mutation is to use the built in mutators of +The most basic form of input mutation is to use the built-in mutators of LibFuzzer. These simply treat the input corpus as a bag of bits and make random mutations. This type of fuzzer is good for stressing the surface layers of a program, and is good at testing things like lexers, parsers, or binary @@ -244,7 +244,7 @@ by adding the following two flags to your CMake invocation: to avoid building the sanitizers themselves with sanitizers enabled. .. note:: You may run into issues if you build with BFD ld, which is the - default linker on many unix systems. These issues are being tracked + default linker on many Unix systems. These issues are being tracked in https://llvm.org/PR34636. Continuously Running and Finding Bugs @@ -280,6 +280,6 @@ your fuzzer can be built and tested when not built against libFuzzer. There is also some handling of the CMake config for fuzzers, where you should use the ``add_llvm_fuzzer`` to set up fuzzer targets. This function works -similarly to functions such as ``add_llvm_tool``, but they take care of linking +similarly to functions such as ``add_llvm_tool``, but it takes care of linking to LibFuzzer when appropriate and can be passed the ``DUMMY_MAIN`` argument to enable standalone testing. diff --git a/llvm/docs/GettingInvolved.rst b/llvm/docs/GettingInvolved.rst index d87a8bd81cc7b..72716fa667487 100644 --- a/llvm/docs/GettingInvolved.rst +++ b/llvm/docs/GettingInvolved.rst @@ -208,7 +208,7 @@ what to add to your calendar invite. - 3rd Tuesday of the month - `ics `__ - `Meeting details/agenda: `__ - * - LLVM Qualification Working Group + * - `LLVM Qualification Working Group `__ - 1st Tuesday/Wednesday of the month - `ics `__ `gcal `__ diff --git a/llvm/docs/ProgrammersManual.rst b/llvm/docs/ProgrammersManual.rst index 602922fcb3b9c..f2b31211cf0dc 100644 --- a/llvm/docs/ProgrammersManual.rst +++ b/llvm/docs/ProgrammersManual.rst @@ -3832,7 +3832,7 @@ Important Subclasses of the ``Instruction`` class * ``BinaryOperator`` - This subclasses represents all two operand instructions whose operands must be + This subclass represents all two operand instructions whose operands must be the same type, except for the comparison instructions. .. _CastInst: diff --git a/llvm/docs/Reference.rst b/llvm/docs/Reference.rst index 7d0fdd78dc96d..5d842d339f8c9 100644 --- a/llvm/docs/Reference.rst +++ b/llvm/docs/Reference.rst @@ -46,6 +46,7 @@ LLVM and API reference documentation. ScudoHardenedAllocator MemoryModelRelaxationAnnotations MemTagSanitizer + QualGroup Security SecurityTransparencyReports SegmentedStacks diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md index 40cddb45df84d..85c16b9c33f10 100644 --- a/llvm/docs/ReleaseNotes.md +++ b/llvm/docs/ReleaseNotes.md @@ -160,6 +160,7 @@ Changes to the LLVM tools * `llvm-readelf` now dumps all hex format values in lower-case mode. * Some code paths for supporting Python 2.7 in `llvm-lit` have been removed. +* Support for `%T` in lit has been removed. Changes to LLDB --------------------------------- diff --git a/llvm/docs/index.rst b/llvm/docs/index.rst index 28ca6bf6316f4..b480729aaa5d9 100644 --- a/llvm/docs/index.rst +++ b/llvm/docs/index.rst @@ -86,7 +86,6 @@ LLVM welcomes contributions of all kinds. To learn more, see the following artic :hidden: GettingInvolved - QualGroup * :doc:`GettingInvolved` * :ref:`development-process` @@ -98,8 +97,6 @@ LLVM welcomes contributions of all kinds. To learn more, see the following artic * :ref:`report-security-issue` -* :doc:`QualGroup` - Indices and tables ================== diff --git a/llvm/include/llvm/ADT/CoalescingBitVector.h b/llvm/include/llvm/ADT/CoalescingBitVector.h index 4940bc1c2c18b..b126fc699ad87 100644 --- a/llvm/include/llvm/ADT/CoalescingBitVector.h +++ b/llvm/include/llvm/ADT/CoalescingBitVector.h @@ -194,10 +194,7 @@ template class CoalescingBitVector { // Delete the overlapping intervals. Split up intervals that only partially // intersect an overlap. - for (IntervalT Overlap : Overlaps) { - IndexT OlapStart, OlapStop; - std::tie(OlapStart, OlapStop) = Overlap; - + for (auto [OlapStart, OlapStop] : Overlaps) { auto It = Intervals.find(OlapStart); IndexT CurrStart = It.start(); IndexT CurrStop = It.stop(); @@ -420,10 +417,7 @@ template class CoalescingBitVector { const SmallVectorImpl &Overlaps, SmallVectorImpl &NonOverlappingParts) { IndexT NextUncoveredBit = Start; - for (IntervalT Overlap : Overlaps) { - IndexT OlapStart, OlapStop; - std::tie(OlapStart, OlapStop) = Overlap; - + for (auto [OlapStart, OlapStop] : Overlaps) { // [Start;Stop] and [OlapStart;OlapStop] overlap iff OlapStart <= Stop // and Start <= OlapStop. bool DoesOverlap = OlapStart <= Stop && Start <= OlapStop; diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h index e13a2cb09a412..bcf3e9676a7b5 100644 --- a/llvm/include/llvm/ADT/DenseMap.h +++ b/llvm/include/llvm/ADT/DenseMap.h @@ -75,37 +75,39 @@ class DenseMapBase : public DebugEpochBase { using const_iterator = DenseMapIterator; - inline iterator begin() { + [[nodiscard]] inline iterator begin() { return iterator::makeBegin(buckets(), empty(), *this); } - inline iterator end() { return iterator::makeEnd(buckets(), *this); } - inline const_iterator begin() const { + [[nodiscard]] inline iterator end() { + return iterator::makeEnd(buckets(), *this); + } + [[nodiscard]] inline const_iterator begin() const { return const_iterator::makeBegin(buckets(), empty(), *this); } - inline const_iterator end() const { + [[nodiscard]] inline const_iterator end() const { return const_iterator::makeEnd(buckets(), *this); } // Return an iterator to iterate over keys in the map. - inline auto keys() { + [[nodiscard]] inline auto keys() { return map_range(*this, [](const BucketT &P) { return P.getFirst(); }); } // Return an iterator to iterate over values in the map. - inline auto values() { + [[nodiscard]] inline auto values() { return map_range(*this, [](const BucketT &P) { return P.getSecond(); }); } - inline auto keys() const { + [[nodiscard]] inline auto keys() const { return map_range(*this, [](const BucketT &P) { return P.getFirst(); }); } - inline auto values() const { + [[nodiscard]] inline auto values() const { return map_range(*this, [](const BucketT &P) { return P.getSecond(); }); } [[nodiscard]] bool empty() const { return getNumEntries() == 0; } - unsigned size() const { return getNumEntries(); } + [[nodiscard]] unsigned size() const { return getNumEntries(); } /// Grow the densemap so that it can contain at least \p NumEntries items /// before resizing again. @@ -153,30 +155,35 @@ class DenseMapBase : public DebugEpochBase { } /// Return true if the specified key is in the map, false otherwise. - bool contains(const_arg_type_t Val) const { + [[nodiscard]] bool contains(const_arg_type_t Val) const { return doFind(Val) != nullptr; } /// Return 1 if the specified key is in the map, 0 otherwise. - size_type count(const_arg_type_t Val) const { + [[nodiscard]] size_type count(const_arg_type_t Val) const { return contains(Val) ? 1 : 0; } - iterator find(const_arg_type_t Val) { return find_as(Val); } - const_iterator find(const_arg_type_t Val) const { return find_as(Val); } + [[nodiscard]] iterator find(const_arg_type_t Val) { + return find_as(Val); + } + [[nodiscard]] const_iterator find(const_arg_type_t Val) const { + return find_as(Val); + } /// Alternate version of find() which allows a different, and possibly /// less expensive, key type. /// The DenseMapInfo is responsible for supplying methods /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key /// type used. - template iterator find_as(const LookupKeyT &Val) { + template + [[nodiscard]] iterator find_as(const LookupKeyT &Val) { if (BucketT *Bucket = doFind(Val)) return makeIterator(Bucket); return end(); } template - const_iterator find_as(const LookupKeyT &Val) const { + [[nodiscard]] const_iterator find_as(const LookupKeyT &Val) const { if (const BucketT *Bucket = doFind(Val)) return makeConstIterator(Bucket); return end(); @@ -184,7 +191,7 @@ class DenseMapBase : public DebugEpochBase { /// lookup - Return the entry for the specified key, or a default /// constructed value if no such entry exists. - ValueT lookup(const_arg_type_t Val) const { + [[nodiscard]] ValueT lookup(const_arg_type_t Val) const { if (const BucketT *Bucket = doFind(Val)) return Bucket->getSecond(); return ValueT(); @@ -194,7 +201,8 @@ class DenseMapBase : public DebugEpochBase { // useful, because `lookup` cannot be used with non-default-constructible // values. template > - ValueT lookup_or(const_arg_type_t Val, U &&Default) const { + [[nodiscard]] ValueT lookup_or(const_arg_type_t Val, + U &&Default) const { if (const BucketT *Bucket = doFind(Val)) return Bucket->getSecond(); return Default; @@ -202,7 +210,7 @@ class DenseMapBase : public DebugEpochBase { /// at - Return the entry for the specified key, or abort if no such /// entry exists. - const ValueT &at(const_arg_type_t Val) const { + [[nodiscard]] const ValueT &at(const_arg_type_t Val) const { auto Iter = this->find(std::move(Val)); assert(Iter != this->end() && "DenseMap::at failed due to a missing key"); return Iter->second; @@ -330,14 +338,16 @@ class DenseMapBase : public DebugEpochBase { /// isPointerIntoBucketsArray - Return true if the specified pointer points /// somewhere into the DenseMap's array of buckets (i.e. either to a key or /// value in the DenseMap). - bool isPointerIntoBucketsArray(const void *Ptr) const { + [[nodiscard]] bool isPointerIntoBucketsArray(const void *Ptr) const { return Ptr >= getBuckets() && Ptr < getBucketsEnd(); } /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets /// array. In conjunction with the previous method, this can be used to /// determine whether an insertion caused the DenseMap to reallocate. - const void *getPointerIntoBucketsArray() const { return getBuckets(); } + [[nodiscard]] const void *getPointerIntoBucketsArray() const { + return getBuckets(); + } protected: DenseMapBase() = default; @@ -448,6 +458,11 @@ class DenseMapBase : public DebugEpochBase { static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); } private: + DerivedT &derived() { return *static_cast(this); } + const DerivedT &derived() const { + return *static_cast(this); + } + template std::pair lookupOrInsertIntoBucket(KeyArgT &&Key, Ts &&...Args) { @@ -477,39 +492,27 @@ class DenseMapBase : public DebugEpochBase { return const_iterator::makeIterator(TheBucket, buckets(), *this); } - unsigned getNumEntries() const { - return static_cast(this)->getNumEntries(); - } + unsigned getNumEntries() const { return derived().getNumEntries(); } - void setNumEntries(unsigned Num) { - static_cast(this)->setNumEntries(Num); - } + void setNumEntries(unsigned Num) { derived().setNumEntries(Num); } void incrementNumEntries() { setNumEntries(getNumEntries() + 1); } void decrementNumEntries() { setNumEntries(getNumEntries() - 1); } - unsigned getNumTombstones() const { - return static_cast(this)->getNumTombstones(); - } + unsigned getNumTombstones() const { return derived().getNumTombstones(); } - void setNumTombstones(unsigned Num) { - static_cast(this)->setNumTombstones(Num); - } + void setNumTombstones(unsigned Num) { derived().setNumTombstones(Num); } void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); } void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); } - const BucketT *getBuckets() const { - return static_cast(this)->getBuckets(); - } + const BucketT *getBuckets() const { return derived().getBuckets(); } - BucketT *getBuckets() { return static_cast(this)->getBuckets(); } + BucketT *getBuckets() { return derived().getBuckets(); } - unsigned getNumBuckets() const { - return static_cast(this)->getNumBuckets(); - } + unsigned getNumBuckets() const { return derived().getNumBuckets(); } BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); } @@ -525,9 +528,9 @@ class DenseMapBase : public DebugEpochBase { return llvm::make_range(getBuckets(), getBucketsEnd()); } - void grow(unsigned AtLeast) { static_cast(this)->grow(AtLeast); } + void grow(unsigned AtLeast) { derived().grow(AtLeast); } - void shrink_and_clear() { static_cast(this)->shrink_and_clear(); } + void shrink_and_clear() { derived().shrink_and_clear(); } template BucketT *findBucketForInsertion(const LookupKeyT &Lookup, @@ -656,7 +659,9 @@ class DenseMapBase : public DebugEpochBase { /// This is just the raw memory used by DenseMap. /// If entries are pointers to objects, the size of the referenced objects /// are not included. - size_t getMemorySize() const { return getNumBuckets() * sizeof(BucketT); } + [[nodiscard]] size_t getMemorySize() const { + return getNumBuckets() * sizeof(BucketT); + } }; /// Equality comparison for DenseMap. @@ -667,9 +672,9 @@ class DenseMapBase : public DebugEpochBase { /// complexity is linear, worst case is O(N^2) (if every hash collides). template -bool operator==( - const DenseMapBase &LHS, - const DenseMapBase &RHS) { +[[nodiscard]] bool +operator==(const DenseMapBase &LHS, + const DenseMapBase &RHS) { if (LHS.size() != RHS.size()) return false; @@ -687,9 +692,9 @@ bool operator==( /// Equivalent to !(LHS == RHS). See operator== for performance notes. template -bool operator!=( - const DenseMapBase &LHS, - const DenseMapBase &RHS) { +[[nodiscard]] bool +operator!=(const DenseMapBase &LHS, + const DenseMapBase &RHS) { return !(LHS == RHS); } @@ -1009,21 +1014,13 @@ class SmallDenseMap void copyFrom(const SmallDenseMap &other) { this->destroyAll(); deallocateBuckets(); - Small = true; - if (other.getNumBuckets() > InlineBuckets) { - Small = false; - new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); - } + allocateBuckets(other.getNumBuckets()); this->BaseT::copyFrom(other); } void init(unsigned InitNumEntries) { auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); - Small = true; - if (InitBuckets > InlineBuckets) { - Small = false; - new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); - } + allocateBuckets(InitBuckets); this->BaseT::initEmpty(); } @@ -1057,21 +1054,14 @@ class SmallDenseMap // AtLeast == InlineBuckets can happen if there are many tombstones, // and grow() is used to remove them. Usually we always switch to the // large rep here. - if (AtLeast > InlineBuckets) { - Small = false; - new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); - } + allocateBuckets(AtLeast); this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd)); return; } LargeRep OldRep = std::move(*getLargeRep()); getLargeRep()->~LargeRep(); - if (AtLeast <= InlineBuckets) { - Small = true; - } else { - new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); - } + allocateBuckets(AtLeast); this->moveFromOldBuckets(OldRep.buckets()); @@ -1166,12 +1156,15 @@ class SmallDenseMap getLargeRep()->~LargeRep(); } - LargeRep allocateBuckets(unsigned Num) { - assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); - LargeRep Rep = {static_cast(allocate_buffer( - sizeof(BucketT) * Num, alignof(BucketT))), - Num}; - return Rep; + void allocateBuckets(unsigned Num) { + if (Num <= InlineBuckets) { + Small = true; + } else { + Small = false; + BucketT *NewBuckets = static_cast( + allocate_buffer(sizeof(BucketT) * Num, alignof(BucketT))); + new (getLargeRep()) LargeRep{NewBuckets, Num}; + } } }; @@ -1239,15 +1232,15 @@ class DenseMapIterator : DebugEpochBase::HandleBase { const DenseMapIterator &I) : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {} - reference operator*() const { + [[nodiscard]] reference operator*() const { assert(isHandleInSync() && "invalid iterator access!"); assert(Ptr != End && "dereferencing end() iterator"); return *Ptr; } - pointer operator->() const { return &operator*(); } + [[nodiscard]] pointer operator->() const { return &operator*(); } - friend bool operator==(const DenseMapIterator &LHS, - const DenseMapIterator &RHS) { + [[nodiscard]] friend bool operator==(const DenseMapIterator &LHS, + const DenseMapIterator &RHS) { assert((!LHS.getEpochAddress() || LHS.isHandleInSync()) && "handle not in sync!"); assert((!RHS.getEpochAddress() || RHS.isHandleInSync()) && @@ -1257,8 +1250,8 @@ class DenseMapIterator : DebugEpochBase::HandleBase { return LHS.Ptr == RHS.Ptr; } - friend bool operator!=(const DenseMapIterator &LHS, - const DenseMapIterator &RHS) { + [[nodiscard]] friend bool operator!=(const DenseMapIterator &LHS, + const DenseMapIterator &RHS) { return !(LHS == RHS); } @@ -1296,7 +1289,8 @@ class DenseMapIterator : DebugEpochBase::HandleBase { }; template -inline size_t capacity_in_bytes(const DenseMap &X) { +[[nodiscard]] inline size_t +capacity_in_bytes(const DenseMap &X) { return X.getMemorySize(); } diff --git a/llvm/include/llvm/ADT/DenseSet.h b/llvm/include/llvm/ADT/DenseSet.h index 60ad9b2eb7762..eec800d07b6df 100644 --- a/llvm/include/llvm/ADT/DenseSet.h +++ b/llvm/include/llvm/ADT/DenseSet.h @@ -83,9 +83,9 @@ class DenseSetImpl { DenseSetImpl(llvm::from_range_t, Range &&R) : DenseSetImpl(adl_begin(R), adl_end(R)) {} - bool empty() const { return TheMap.empty(); } - size_type size() const { return TheMap.size(); } - size_t getMemorySize() const { return TheMap.getMemorySize(); } + [[nodiscard]] bool empty() const { return TheMap.empty(); } + [[nodiscard]] size_type size() const { return TheMap.size(); } + [[nodiscard]] size_t getMemorySize() const { return TheMap.getMemorySize(); } /// Grow the DenseSet so that it has at least Size buckets. Will not shrink /// the Size of the set. @@ -154,14 +154,20 @@ class DenseSetImpl { using iterator = DenseSetIterator; using const_iterator = DenseSetIterator; - iterator begin() { return iterator(TheMap.begin()); } - iterator end() { return iterator(TheMap.end()); } + [[nodiscard]] iterator begin() { return iterator(TheMap.begin()); } + [[nodiscard]] iterator end() { return iterator(TheMap.end()); } - const_iterator begin() const { return const_iterator(TheMap.begin()); } - const_iterator end() const { return const_iterator(TheMap.end()); } + [[nodiscard]] const_iterator begin() const { + return const_iterator(TheMap.begin()); + } + [[nodiscard]] const_iterator end() const { + return const_iterator(TheMap.end()); + } - iterator find(const_arg_type_t V) { return iterator(TheMap.find(V)); } - const_iterator find(const_arg_type_t V) const { + [[nodiscard]] iterator find(const_arg_type_t V) { + return iterator(TheMap.find(V)); + } + [[nodiscard]] const_iterator find(const_arg_type_t V) const { return const_iterator(TheMap.find(V)); } @@ -180,10 +186,12 @@ class DenseSetImpl { /// The DenseMapInfo is responsible for supplying methods /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type /// used. - template iterator find_as(const LookupKeyT &Val) { + template + [[nodiscard]] iterator find_as(const LookupKeyT &Val) { return iterator(TheMap.find_as(Val)); } template + [[nodiscard]] const_iterator find_as(const LookupKeyT &Val) const { return const_iterator(TheMap.find_as(Val)); } @@ -229,8 +237,9 @@ class DenseSetImpl { /// Equivalent to N calls to RHS.count. Amortized complexity is linear, worst /// case is O(N^2) (if every hash collides). template -bool operator==(const DenseSetImpl &LHS, - const DenseSetImpl &RHS) { +[[nodiscard]] bool +operator==(const DenseSetImpl &LHS, + const DenseSetImpl &RHS) { if (LHS.size() != RHS.size()) return false; @@ -245,8 +254,9 @@ bool operator==(const DenseSetImpl &LHS, /// /// Equivalent to !(LHS == RHS). See operator== for performance notes. template -bool operator!=(const DenseSetImpl &LHS, - const DenseSetImpl &RHS) { +[[nodiscard]] bool +operator!=(const DenseSetImpl &LHS, + const DenseSetImpl &RHS) { return !(LHS == RHS); } diff --git a/llvm/include/llvm/ADT/ImmutableMap.h b/llvm/include/llvm/ADT/ImmutableMap.h index 3d19ca41a5be0..32634a96ee9ea 100644 --- a/llvm/include/llvm/ADT/ImmutableMap.h +++ b/llvm/include/llvm/ADT/ImmutableMap.h @@ -111,25 +111,25 @@ class ImmutableMap { } }; - bool contains(key_type_ref K) const { + [[nodiscard]] bool contains(key_type_ref K) const { return Root ? Root->contains(K) : false; } - bool operator==(const ImmutableMap &RHS) const { + [[nodiscard]] bool operator==(const ImmutableMap &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root; } - bool operator!=(const ImmutableMap &RHS) const { + [[nodiscard]] bool operator!=(const ImmutableMap &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get()) : Root != RHS.Root; } - TreeTy *getRoot() const { + [[nodiscard]] TreeTy *getRoot() const { if (Root) { Root->retain(); } return Root.get(); } - TreeTy *getRootWithoutRetain() const { return Root.get(); } + [[nodiscard]] TreeTy *getRootWithoutRetain() const { return Root.get(); } void manualRetain() { if (Root) Root->retain(); @@ -139,7 +139,7 @@ class ImmutableMap { if (Root) Root->release(); } - bool isEmpty() const { return !Root; } + [[nodiscard]] bool isEmpty() const { return !Root; } public: //===--------------------------------------------------===// @@ -163,10 +163,10 @@ class ImmutableMap { data_type_ref getData() const { return (*this)->second; } }; - iterator begin() const { return iterator(Root.get()); } - iterator end() const { return iterator(); } + [[nodiscard]] iterator begin() const { return iterator(Root.get()); } + [[nodiscard]] iterator end() const { return iterator(); } - data_type* lookup(key_type_ref K) const { + [[nodiscard]] data_type *lookup(key_type_ref K) const { if (Root) { TreeTy* T = Root->find(K); if (T) return &T->getValue().second; @@ -178,7 +178,7 @@ class ImmutableMap { /// getMaxElement - Returns the pair in the ImmutableMap for /// which key is the highest in the ordering of keys in the map. This /// method returns NULL if the map is empty. - value_type* getMaxElement() const { + [[nodiscard]] value_type *getMaxElement() const { return Root ? &(Root->getMaxElement()->getValue()) : nullptr; } @@ -186,7 +186,9 @@ class ImmutableMap { // Utility methods. //===--------------------------------------------------===// - unsigned getHeight() const { return Root ? Root->getHeight() : 0; } + [[nodiscard]] unsigned getHeight() const { + return Root ? Root->getHeight() : 0; + } static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) { ID.AddPointer(M.Root.get()); @@ -250,7 +252,7 @@ class ImmutableMapRef { return ImmutableMapRef(NewT, Factory); } - bool contains(key_type_ref K) const { + [[nodiscard]] bool contains(key_type_ref K) const { return Root ? Root->contains(K) : false; } @@ -258,16 +260,16 @@ class ImmutableMapRef { return ImmutableMap(Factory->getCanonicalTree(Root.get())); } - bool operator==(const ImmutableMapRef &RHS) const { + [[nodiscard]] bool operator==(const ImmutableMapRef &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root; } - bool operator!=(const ImmutableMapRef &RHS) const { + [[nodiscard]] bool operator!=(const ImmutableMapRef &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get()) : Root != RHS.Root; } - bool isEmpty() const { return !Root; } + [[nodiscard]] bool isEmpty() const { return !Root; } //===--------------------------------------------------===// // For testing. @@ -293,10 +295,10 @@ class ImmutableMapRef { data_type_ref getData() const { return (*this)->second; } }; - iterator begin() const { return iterator(Root.get()); } - iterator end() const { return iterator(); } + [[nodiscard]] iterator begin() const { return iterator(Root.get()); } + [[nodiscard]] iterator end() const { return iterator(); } - data_type *lookup(key_type_ref K) const { + [[nodiscard]] data_type *lookup(key_type_ref K) const { if (Root) { TreeTy* T = Root->find(K); if (T) return &T->getValue().second; @@ -308,7 +310,7 @@ class ImmutableMapRef { /// getMaxElement - Returns the pair in the ImmutableMap for /// which key is the highest in the ordering of keys in the map. This /// method returns NULL if the map is empty. - value_type* getMaxElement() const { + [[nodiscard]] value_type *getMaxElement() const { return Root ? &(Root->getMaxElement()->getValue()) : nullptr; } @@ -316,7 +318,9 @@ class ImmutableMapRef { // Utility methods. //===--------------------------------------------------===// - unsigned getHeight() const { return Root ? Root->getHeight() : 0; } + [[nodiscard]] unsigned getHeight() const { + return Root ? Root->getHeight() : 0; + } static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) { ID.AddPointer(M.Root.get()); diff --git a/llvm/include/llvm/ADT/MapVector.h b/llvm/include/llvm/ADT/MapVector.h index 4a50126ff5aad..82f2c4977e01d 100644 --- a/llvm/include/llvm/ADT/MapVector.h +++ b/llvm/include/llvm/ADT/MapVector.h @@ -45,15 +45,15 @@ class MapVector { using const_reverse_iterator = typename VectorType::const_reverse_iterator; /// Clear the MapVector and return the underlying vector. - VectorType takeVector() { + [[nodiscard]] VectorType takeVector() { Map.clear(); return std::move(Vector); } /// Returns an array reference of the underlying vector. - ArrayRef getArrayRef() const { return Vector; } + [[nodiscard]] ArrayRef getArrayRef() const { return Vector; } - size_type size() const { return Vector.size(); } + [[nodiscard]] size_type size() const { return Vector.size(); } /// Grow the MapVector so that it can contain at least \p NumEntries items /// before resizing again. @@ -62,24 +62,28 @@ class MapVector { Vector.reserve(NumEntries); } - iterator begin() { return Vector.begin(); } - const_iterator begin() const { return Vector.begin(); } - iterator end() { return Vector.end(); } - const_iterator end() const { return Vector.end(); } + [[nodiscard]] iterator begin() { return Vector.begin(); } + [[nodiscard]] const_iterator begin() const { return Vector.begin(); } + [[nodiscard]] iterator end() { return Vector.end(); } + [[nodiscard]] const_iterator end() const { return Vector.end(); } - reverse_iterator rbegin() { return Vector.rbegin(); } - const_reverse_iterator rbegin() const { return Vector.rbegin(); } - reverse_iterator rend() { return Vector.rend(); } - const_reverse_iterator rend() const { return Vector.rend(); } - - bool empty() const { - return Vector.empty(); + [[nodiscard]] reverse_iterator rbegin() { return Vector.rbegin(); } + [[nodiscard]] const_reverse_iterator rbegin() const { + return Vector.rbegin(); } + [[nodiscard]] reverse_iterator rend() { return Vector.rend(); } + [[nodiscard]] const_reverse_iterator rend() const { return Vector.rend(); } + + [[nodiscard]] bool empty() const { return Vector.empty(); } - std::pair &front() { return Vector.front(); } - const std::pair &front() const { return Vector.front(); } - std::pair &back() { return Vector.back(); } - const std::pair &back() const { return Vector.back(); } + [[nodiscard]] std::pair &front() { return Vector.front(); } + [[nodiscard]] const std::pair &front() const { + return Vector.front(); + } + [[nodiscard]] std::pair &back() { return Vector.back(); } + [[nodiscard]] const std::pair &back() const { + return Vector.back(); + } void clear() { Map.clear(); @@ -96,7 +100,7 @@ class MapVector { } // Returns a copy of the value. Only allowed if ValueT is copyable. - ValueT lookup(const KeyT &Key) const { + [[nodiscard]] ValueT lookup(const KeyT &Key) const { static_assert(std::is_copy_constructible_v, "Cannot call lookup() if ValueT is not copyable."); typename MapType::const_iterator Pos = Map.find(Key); @@ -134,17 +138,21 @@ class MapVector { return Ret; } - bool contains(const KeyT &Key) const { return Map.find(Key) != Map.end(); } + [[nodiscard]] bool contains(const KeyT &Key) const { + return Map.find(Key) != Map.end(); + } - size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; } + [[nodiscard]] size_type count(const KeyT &Key) const { + return contains(Key) ? 1 : 0; + } - iterator find(const KeyT &Key) { + [[nodiscard]] iterator find(const KeyT &Key) { typename MapType::const_iterator Pos = Map.find(Key); return Pos == Map.end()? Vector.end() : (Vector.begin() + Pos->second); } - const_iterator find(const KeyT &Key) const { + [[nodiscard]] const_iterator find(const KeyT &Key) const { typename MapType::const_iterator Pos = Map.find(Key); return Pos == Map.end()? Vector.end() : (Vector.begin() + Pos->second); diff --git a/llvm/include/llvm/ADT/PackedVector.h b/llvm/include/llvm/ADT/PackedVector.h index 1146cc4bd6d23..77fcbf24b2861 100644 --- a/llvm/include/llvm/ADT/PackedVector.h +++ b/llvm/include/llvm/ADT/PackedVector.h @@ -47,7 +47,7 @@ class PackedVectorBase { protected: static T getValue(const BitVectorTy &Bits, unsigned Idx) { T val = T(); - for (unsigned i = 0; i != BitNum-1; ++i) + for (unsigned i = 0; i != BitNum - 1; ++i) val = T(val | ((Bits[(Idx * BitNum) + i] ? 1UL : 0UL) << i)); if (Bits[(Idx * BitNum) + BitNum - 1]) val = ~val; @@ -58,9 +58,11 @@ class PackedVectorBase { if (val < 0) { val = ~val; Bits.set((Idx * BitNum) + BitNum - 1); + } else { + Bits.reset((Idx * BitNum) + BitNum - 1); } - assert((val >> (BitNum-1)) == 0 && "value is too big"); - for (unsigned i = 0; i != BitNum-1; ++i) + assert((val >> (BitNum - 1)) == 0 && "value is too big"); + for (unsigned i = 0; i != BitNum - 1; ++i) Bits[(Idx * BitNum) + i] = val & (T(1) << i); } }; @@ -73,8 +75,9 @@ class PackedVectorBase { /// will create a vector accepting values -2, -1, 0, 1. Any other value will hit /// an assertion. template -class PackedVector : public PackedVectorBase::is_signed> { +class PackedVector + : public PackedVectorBase::is_signed> { BitVectorTy Bits; // Keep track of the number of elements on our own. // We always maintain Bits.size() == NumElements * BitNum. @@ -97,9 +100,7 @@ class PackedVector : public PackedVectorBase getArrayRef() const { return vector_; } + [[nodiscard]] ArrayRef getArrayRef() const { return vector_; } /// Clear the SetVector and return the underlying vector. - Vector takeVector() { + [[nodiscard]] Vector takeVector() { set_.clear(); return std::move(vector_); } /// Determine if the SetVector is empty or not. - bool empty() const { - return vector_.empty(); - } + [[nodiscard]] bool empty() const { return vector_.empty(); } /// Determine the number of elements in the SetVector. - size_type size() const { - return vector_.size(); - } + [[nodiscard]] size_type size() const { return vector_.size(); } /// Get an iterator to the beginning of the SetVector. - iterator begin() { - return vector_.begin(); - } + [[nodiscard]] iterator begin() { return vector_.begin(); } /// Get a const_iterator to the beginning of the SetVector. - const_iterator begin() const { - return vector_.begin(); - } + [[nodiscard]] const_iterator begin() const { return vector_.begin(); } /// Get an iterator to the end of the SetVector. - iterator end() { - return vector_.end(); - } + [[nodiscard]] iterator end() { return vector_.end(); } /// Get a const_iterator to the end of the SetVector. - const_iterator end() const { - return vector_.end(); - } + [[nodiscard]] const_iterator end() const { return vector_.end(); } /// Get an reverse_iterator to the end of the SetVector. - reverse_iterator rbegin() { - return vector_.rbegin(); - } + [[nodiscard]] reverse_iterator rbegin() { return vector_.rbegin(); } /// Get a const_reverse_iterator to the end of the SetVector. - const_reverse_iterator rbegin() const { + [[nodiscard]] const_reverse_iterator rbegin() const { return vector_.rbegin(); } /// Get a reverse_iterator to the beginning of the SetVector. - reverse_iterator rend() { - return vector_.rend(); - } + [[nodiscard]] reverse_iterator rend() { return vector_.rend(); } /// Get a const_reverse_iterator to the beginning of the SetVector. - const_reverse_iterator rend() const { - return vector_.rend(); - } + [[nodiscard]] const_reverse_iterator rend() const { return vector_.rend(); } /// Return the first element of the SetVector. - const value_type &front() const { + [[nodiscard]] const value_type &front() const { assert(!empty() && "Cannot call front() on empty SetVector!"); return vector_.front(); } /// Return the last element of the SetVector. - const value_type &back() const { + [[nodiscard]] const value_type &back() const { assert(!empty() && "Cannot call back() on empty SetVector!"); return vector_.back(); } @@ -299,11 +281,11 @@ class SetVector { return Ret; } - bool operator==(const SetVector &that) const { + [[nodiscard]] bool operator==(const SetVector &that) const { return vector_ == that.vector_; } - bool operator!=(const SetVector &that) const { + [[nodiscard]] bool operator!=(const SetVector &that) const { return vector_ != that.vector_; } diff --git a/llvm/include/llvm/ADT/SmallPtrSet.h b/llvm/include/llvm/ADT/SmallPtrSet.h index d5332379fc542..e24cd6415b687 100644 --- a/llvm/include/llvm/ADT/SmallPtrSet.h +++ b/llvm/include/llvm/ADT/SmallPtrSet.h @@ -96,8 +96,8 @@ class SmallPtrSetImplBase : public DebugEpochBase { SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete; [[nodiscard]] bool empty() const { return size() == 0; } - size_type size() const { return NumEntries; } - size_type capacity() const { return CurArraySize; } + [[nodiscard]] size_type size() const { return NumEntries; } + [[nodiscard]] size_type capacity() const { return CurArraySize; } void clear() { incrementEpoch(); @@ -279,19 +279,12 @@ class SmallPtrSetImplBase : public DebugEpochBase { /// SmallPtrSetIteratorImpl - This is the common base class shared between all /// instances of SmallPtrSetIterator. -class SmallPtrSetIteratorImpl { -protected: - using BucketItTy = - std::conditional_t, - const void *const *>; - - BucketItTy Bucket; - BucketItTy End; - +class LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE SmallPtrSetIteratorImpl + : public DebugEpochBase::HandleBase { public: - explicit SmallPtrSetIteratorImpl(const void *const *BP, const void *const *E) - : Bucket(BP), End(E) { + explicit SmallPtrSetIteratorImpl(const void *const *BP, const void *const *E, + const DebugEpochBase &Epoch) + : DebugEpochBase::HandleBase(&Epoch), Bucket(BP), End(E) { AdvanceIfNotValid(); } @@ -303,6 +296,18 @@ class SmallPtrSetIteratorImpl { } protected: + void *dereference() const { + assert(isHandleInSync() && "invalid iterator access!"); + assert(Bucket < End); + return const_cast(*Bucket); + } + void increment() { + assert(isHandleInSync() && "invalid iterator access!"); + ++Bucket; + AdvanceIfNotValid(); + } + +private: /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket /// that is. This is guaranteed to stop because the end() bucket is marked /// valid. @@ -313,13 +318,19 @@ class SmallPtrSetIteratorImpl { *Bucket == SmallPtrSetImplBase::getTombstoneMarker())) ++Bucket; } + + using BucketItTy = + std::conditional_t, + const void *const *>; + + BucketItTy Bucket; + BucketItTy End; }; /// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet. template -class LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE SmallPtrSetIterator - : public SmallPtrSetIteratorImpl, - DebugEpochBase::HandleBase { +class SmallPtrSetIterator : public SmallPtrSetIteratorImpl { using PtrTraits = PointerLikeTypeTraits; public: @@ -329,28 +340,22 @@ class LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE SmallPtrSetIterator using difference_type = std::ptrdiff_t; using iterator_category = std::forward_iterator_tag; - explicit SmallPtrSetIterator(const void *const *BP, const void *const *E, - const DebugEpochBase &Epoch) - : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {} + using SmallPtrSetIteratorImpl::SmallPtrSetIteratorImpl; // Most methods are provided by the base class. - const PtrTy operator*() const { - assert(isHandleInSync() && "invalid iterator access!"); - assert(Bucket < End); - return PtrTraits::getFromVoidPointer(const_cast(*Bucket)); + [[nodiscard]] const PtrTy operator*() const { + return PtrTraits::getFromVoidPointer(dereference()); } inline SmallPtrSetIterator &operator++() { // Preincrement - assert(isHandleInSync() && "invalid iterator access!"); - ++Bucket; - AdvanceIfNotValid(); + increment(); return *this; } SmallPtrSetIterator operator++(int) { // Postincrement SmallPtrSetIterator tmp = *this; - ++*this; + increment(); return tmp; } }; @@ -447,13 +452,13 @@ template class SmallPtrSetImpl : public SmallPtrSetImplBase { } /// count - Return 1 if the specified pointer is in the set, 0 otherwise. - size_type count(ConstPtrType Ptr) const { + [[nodiscard]] size_type count(ConstPtrType Ptr) const { return contains_imp(ConstPtrTraits::getAsVoidPointer(Ptr)); } - iterator find(ConstPtrType Ptr) const { + [[nodiscard]] iterator find(ConstPtrType Ptr) const { return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr))); } - bool contains(ConstPtrType Ptr) const { + [[nodiscard]] bool contains(ConstPtrType Ptr) const { return contains_imp(ConstPtrTraits::getAsVoidPointer(Ptr)); } @@ -470,12 +475,12 @@ template class SmallPtrSetImpl : public SmallPtrSetImplBase { insert(adl_begin(R), adl_end(R)); } - iterator begin() const { + [[nodiscard]] iterator begin() const { if (shouldReverseIterate()) return makeIterator(EndPointer() - 1); return makeIterator(CurArray); } - iterator end() const { return makeIterator(EndPointer()); } + [[nodiscard]] iterator end() const { return makeIterator(EndPointer()); } private: /// Create an iterator that dereferences to same place as the given pointer. @@ -491,8 +496,8 @@ template class SmallPtrSetImpl : public SmallPtrSetImplBase { /// Iterates over elements of LHS confirming that each value from LHS is also in /// RHS, and that no additional values are in RHS. template -bool operator==(const SmallPtrSetImpl &LHS, - const SmallPtrSetImpl &RHS) { +[[nodiscard]] bool operator==(const SmallPtrSetImpl &LHS, + const SmallPtrSetImpl &RHS) { if (LHS.size() != RHS.size()) return false; @@ -507,8 +512,8 @@ bool operator==(const SmallPtrSetImpl &LHS, /// /// Equivalent to !(LHS == RHS). template -bool operator!=(const SmallPtrSetImpl &LHS, - const SmallPtrSetImpl &RHS) { +[[nodiscard]] bool operator!=(const SmallPtrSetImpl &LHS, + const SmallPtrSetImpl &RHS) { return !(LHS == RHS); } diff --git a/llvm/include/llvm/ADT/SmallSet.h b/llvm/include/llvm/ADT/SmallSet.h index 0e90293352630..3ca833f15eed3 100644 --- a/llvm/include/llvm/ADT/SmallSet.h +++ b/llvm/include/llvm/ADT/SmallSet.h @@ -167,12 +167,14 @@ class SmallSet { [[nodiscard]] bool empty() const { return Vector.empty() && Set.empty(); } - size_type size() const { + [[nodiscard]] size_type size() const { return isSmall() ? Vector.size() : Set.size(); } /// count - Return 1 if the element is in the set, 0 otherwise. - size_type count(const T &V) const { return contains(V) ? 1 : 0; } + [[nodiscard]] size_type count(const T &V) const { + return contains(V) ? 1 : 0; + } /// insert - Insert an element into the set if it isn't already there. /// Returns a pair. The first value of it is an iterator to the inserted @@ -210,20 +212,20 @@ class SmallSet { Set.clear(); } - const_iterator begin() const { + [[nodiscard]] const_iterator begin() const { if (isSmall()) return {Vector.begin()}; return {Set.begin()}; } - const_iterator end() const { + [[nodiscard]] const_iterator end() const { if (isSmall()) return {Vector.end()}; return {Set.end()}; } /// Check if the SmallSet contains the given element. - bool contains(const T &V) const { + [[nodiscard]] bool contains(const T &V) const { if (isSmall()) return vfind(V) != Vector.end(); return Set.find(V) != Set.end(); @@ -279,7 +281,8 @@ class SmallSet : public SmallPtrSet {}; /// For large-set mode amortized complexity is linear, worst case is O(N^2) (if /// every hash collides). template -bool operator==(const SmallSet &LHS, const SmallSet &RHS) { +[[nodiscard]] bool operator==(const SmallSet &LHS, + const SmallSet &RHS) { if (LHS.size() != RHS.size()) return false; @@ -291,7 +294,8 @@ bool operator==(const SmallSet &LHS, const SmallSet &RHS) { /// /// Equivalent to !(LHS == RHS). See operator== for performance notes. template -bool operator!=(const SmallSet &LHS, const SmallSet &RHS) { +[[nodiscard]] bool operator!=(const SmallSet &LHS, + const SmallSet &RHS) { return !(LHS == RHS); } diff --git a/llvm/include/llvm/ADT/SmallVector.h b/llvm/include/llvm/ADT/SmallVector.h index 36b324355ee10..77805f5c03c14 100644 --- a/llvm/include/llvm/ADT/SmallVector.h +++ b/llvm/include/llvm/ADT/SmallVector.h @@ -199,17 +199,18 @@ class SmallVectorTemplateCommon } /// Check whether any part of the range will be invalidated by clearing. - void assertSafeToReferenceAfterClear(const T *From, const T *To) { - if (From == To) - return; - this->assertSafeToReferenceAfterResize(From, 0); - this->assertSafeToReferenceAfterResize(To - 1, 0); - } - template < - class ItTy, - std::enable_if_t, T *>::value, - bool> = false> - void assertSafeToReferenceAfterClear(ItTy, ItTy) {} + template + void assertSafeToReferenceAfterClear(ItTy From, ItTy To) { + if constexpr (std::is_pointer_v && + std::is_same_v< + std::remove_const_t>, + std::remove_const_t>) { + if (From == To) + return; + this->assertSafeToReferenceAfterResize(From, 0); + this->assertSafeToReferenceAfterResize(To - 1, 0); + } + } /// Check whether any part of the range will be invalidated by growing. template void assertSafeToAddRange(ItTy From, ItTy To) { diff --git a/llvm/include/llvm/ADT/SparseSet.h b/llvm/include/llvm/ADT/SparseSet.h index 395cfc3ebfd43..9783301be4b64 100644 --- a/llvm/include/llvm/ADT/SparseSet.h +++ b/llvm/include/llvm/ADT/SparseSet.h @@ -171,23 +171,23 @@ class SparseSet { using iterator = typename DenseT::iterator; using const_iterator = typename DenseT::const_iterator; - const_iterator begin() const { return Dense.begin(); } - const_iterator end() const { return Dense.end(); } - iterator begin() { return Dense.begin(); } - iterator end() { return Dense.end(); } + [[nodiscard]] const_iterator begin() const { return Dense.begin(); } + [[nodiscard]] const_iterator end() const { return Dense.end(); } + [[nodiscard]] iterator begin() { return Dense.begin(); } + [[nodiscard]] iterator end() { return Dense.end(); } /// empty - Returns true if the set is empty. /// /// This is not the same as BitVector::empty(). /// - bool empty() const { return Dense.empty(); } + [[nodiscard]] bool empty() const { return Dense.empty(); } /// size - Returns the number of elements in the set. /// /// This is not the same as BitVector::size() which returns the size of the /// universe. /// - size_type size() const { return Dense.size(); } + [[nodiscard]] size_type size() const { return Dense.size(); } /// clear - Clears the set. This is a very fast constant time operation. /// @@ -222,21 +222,27 @@ class SparseSet { /// @param Key A valid key to find. /// @returns An iterator to the element identified by key, or end(). /// - iterator find(const KeyT &Key) { return findIndex(KeyIndexOf(Key)); } + [[nodiscard]] iterator find(const KeyT &Key) { + return findIndex(KeyIndexOf(Key)); + } - const_iterator find(const KeyT &Key) const { + [[nodiscard]] const_iterator find(const KeyT &Key) const { return const_cast(this)->findIndex(KeyIndexOf(Key)); } /// Check if the set contains the given \c Key. /// /// @param Key A valid key to find. - bool contains(const KeyT &Key) const { return find(Key) != end(); } + [[nodiscard]] bool contains(const KeyT &Key) const { + return find(Key) != end(); + } /// count - Returns 1 if this set contains an element identified by Key, /// 0 otherwise. /// - size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; } + [[nodiscard]] size_type count(const KeyT &Key) const { + return contains(Key) ? 1 : 0; + } /// insert - Attempts to insert a new element. /// diff --git a/llvm/include/llvm/ADT/StringMap.h b/llvm/include/llvm/ADT/StringMap.h index 2c146fbf08df1..01cbf2d3fff71 100644 --- a/llvm/include/llvm/ADT/StringMap.h +++ b/llvm/include/llvm/ADT/StringMap.h @@ -102,18 +102,18 @@ class StringMapImpl { return reinterpret_cast(TombstoneIntVal); } - unsigned getNumBuckets() const { return NumBuckets; } - unsigned getNumItems() const { return NumItems; } + [[nodiscard]] unsigned getNumBuckets() const { return NumBuckets; } + [[nodiscard]] unsigned getNumItems() const { return NumItems; } - bool empty() const { return NumItems == 0; } - unsigned size() const { return NumItems; } + [[nodiscard]] bool empty() const { return NumItems == 0; } + [[nodiscard]] unsigned size() const { return NumItems; } /// Returns the hash value that will be used for the given string. /// This allows precomputing the value and passing it explicitly /// to some of the functions. /// The implementation of this function is not guaranteed to be stable /// and may change. - LLVM_ABI static uint32_t hash(StringRef Key); + [[nodiscard]] LLVM_ABI static uint32_t hash(StringRef Key); void swap(StringMapImpl &Other) { std::swap(TheTable, Other.TheTable); @@ -220,30 +220,35 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap using const_iterator = StringMapIterBase; using iterator = StringMapIterBase; - iterator begin() { return iterator(TheTable, NumBuckets != 0); } - iterator end() { return iterator(TheTable + NumBuckets); } - const_iterator begin() const { + [[nodiscard]] iterator begin() { return iterator(TheTable, NumBuckets != 0); } + [[nodiscard]] iterator end() { return iterator(TheTable + NumBuckets); } + [[nodiscard]] const_iterator begin() const { return const_iterator(TheTable, NumBuckets != 0); } - const_iterator end() const { return const_iterator(TheTable + NumBuckets); } + [[nodiscard]] const_iterator end() const { + return const_iterator(TheTable + NumBuckets); + } - iterator_range> keys() const { + [[nodiscard]] iterator_range> keys() const { return make_range(StringMapKeyIterator(begin()), StringMapKeyIterator(end())); } - iterator find(StringRef Key) { return find(Key, hash(Key)); } + [[nodiscard]] iterator find(StringRef Key) { return find(Key, hash(Key)); } - iterator find(StringRef Key, uint32_t FullHashValue) { + [[nodiscard]] iterator find(StringRef Key, uint32_t FullHashValue) { int Bucket = FindKey(Key, FullHashValue); if (Bucket == -1) return end(); return iterator(TheTable + Bucket); } - const_iterator find(StringRef Key) const { return find(Key, hash(Key)); } + [[nodiscard]] const_iterator find(StringRef Key) const { + return find(Key, hash(Key)); + } - const_iterator find(StringRef Key, uint32_t FullHashValue) const { + [[nodiscard]] const_iterator find(StringRef Key, + uint32_t FullHashValue) const { int Bucket = FindKey(Key, FullHashValue); if (Bucket == -1) return end(); @@ -252,7 +257,7 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap /// lookup - Return the entry for the specified key, or a default /// constructed value if no such entry exists. - ValueTy lookup(StringRef Key) const { + [[nodiscard]] ValueTy lookup(StringRef Key) const { const_iterator Iter = find(Key); if (Iter != end()) return Iter->second; @@ -261,7 +266,7 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap /// at - Return the entry for the specified key, or abort if no such /// entry exists. - const ValueTy &at(StringRef Val) const { + [[nodiscard]] const ValueTy &at(StringRef Val) const { auto Iter = this->find(Val); assert(Iter != this->end() && "StringMap::at failed due to a missing key"); return Iter->second; @@ -272,18 +277,22 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; } /// contains - Return true if the element is in the map, false otherwise. - bool contains(StringRef Key) const { return find(Key) != end(); } + [[nodiscard]] bool contains(StringRef Key) const { + return find(Key) != end(); + } /// count - Return 1 if the element is in the map, 0 otherwise. - size_type count(StringRef Key) const { return contains(Key) ? 1 : 0; } + [[nodiscard]] size_type count(StringRef Key) const { + return contains(Key) ? 1 : 0; + } template - size_type count(const StringMapEntry &MapEntry) const { + [[nodiscard]] size_type count(const StringMapEntry &MapEntry) const { return count(MapEntry.getKey()); } /// equal - check whether both of the containers are equal. - bool operator==(const StringMap &RHS) const { + [[nodiscard]] bool operator==(const StringMap &RHS) const { if (size() != RHS.size()) return false; @@ -302,7 +311,9 @@ class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap return true; } - bool operator!=(const StringMap &RHS) const { return !(*this == RHS); } + [[nodiscard]] bool operator!=(const StringMap &RHS) const { + return !(*this == RHS); + } /// insert - Insert the specified key/value pair into the map. If the key /// already exists in the map, return false and ignore the request, otherwise @@ -447,8 +458,12 @@ template class StringMapIterBase { AdvancePastEmptyBuckets(); } - reference operator*() const { return *static_cast(*Ptr); } - pointer operator->() const { return static_cast(*Ptr); } + [[nodiscard]] reference operator*() const { + return *static_cast(*Ptr); + } + [[nodiscard]] pointer operator->() const { + return static_cast(*Ptr); + } StringMapIterBase &operator++() { // Preincrement ++Ptr; diff --git a/llvm/include/llvm/ADT/StringSet.h b/llvm/include/llvm/ADT/StringSet.h index b4853423a1ef3..c8be3f2a503e4 100644 --- a/llvm/include/llvm/ADT/StringSet.h +++ b/llvm/include/llvm/ADT/StringSet.h @@ -57,7 +57,9 @@ class StringSet : public StringMap { } /// Check if the set contains the given \c key. - bool contains(StringRef key) const { return Base::contains(key); } + [[nodiscard]] bool contains(StringRef key) const { + return Base::contains(key); + } }; } // end namespace llvm diff --git a/llvm/include/llvm/ADT/StringTable.h b/llvm/include/llvm/ADT/StringTable.h index 575b3c929e40c..9422a6da1ce8e 100644 --- a/llvm/include/llvm/ADT/StringTable.h +++ b/llvm/include/llvm/ADT/StringTable.h @@ -118,12 +118,8 @@ class StringTable { constexpr Iterator(const Iterator &RHS) = default; constexpr Iterator(Iterator &&RHS) = default; - Iterator &operator=(const Iterator &RHS) { - Table = RHS.Table; - O = RHS.O; - S = RHS.S; - return *this; - } + constexpr Iterator &operator=(const Iterator &RHS) = default; + constexpr Iterator &operator=(Iterator &&RHS) = default; bool operator==(const Iterator &RHS) const { assert(Table == RHS.Table && "Compared iterators for unrelated tables!"); diff --git a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h index abcf9a841d5fc..e8124f72a1a81 100644 --- a/llvm/include/llvm/Analysis/InterestingMemoryOperand.h +++ b/llvm/include/llvm/Analysis/InterestingMemoryOperand.h @@ -32,14 +32,19 @@ class InterestingMemoryOperand { Value *MaybeEVL; // The Stride Value, if we're looking at a strided load/store. Value *MaybeStride; + // The Offset Value, if we're looking at a indexed load/store. The + // offset actually means byte-offset instead of array index. + Value *MaybeByteOffset; InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, class Type *OpType, MaybeAlign Alignment, Value *MaybeMask = nullptr, Value *MaybeEVL = nullptr, - Value *MaybeStride = nullptr) + Value *MaybeStride = nullptr, + Value *MaybeByteOffset = nullptr) : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment), - MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) { + MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride), + MaybeByteOffset(MaybeByteOffset) { const DataLayout &DL = I->getDataLayout(); TypeStoreSize = DL.getTypeStoreSizeInBits(OpType); PtrUse = &I->getOperandUse(OperandNo); diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h index 49a795b5fd6a7..52ab38583d5de 100644 --- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h +++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h @@ -413,30 +413,29 @@ class MemoryDepChecker { uint64_t MaxStride; std::optional CommonStride; - /// TypeByteSize is a pair of alloc sizes of the source and sink. - std::pair TypeByteSize; - - // HasSameSize is a boolean indicating whether the store sizes of the source - // and sink are equal. - // TODO: Remove this. - bool HasSameSize; + /// TypeByteSize is either the common store size of both accesses, or 0 when + /// store sizes mismatch. + uint64_t TypeByteSize; bool AIsWrite; bool BIsWrite; DepDistanceStrideAndSizeInfo(const SCEV *Dist, uint64_t MaxStride, std::optional CommonStride, - std::pair TypeByteSize, - bool HasSameSize, bool AIsWrite, bool BIsWrite) + uint64_t TypeByteSize, bool AIsWrite, + bool BIsWrite) : Dist(Dist), MaxStride(MaxStride), CommonStride(CommonStride), - TypeByteSize(TypeByteSize), HasSameSize(HasSameSize), - AIsWrite(AIsWrite), BIsWrite(BIsWrite) {} + TypeByteSize(TypeByteSize), AIsWrite(AIsWrite), BIsWrite(BIsWrite) {} }; /// Get the dependence distance, strides, type size and whether it is a write - /// for the dependence between A and B. Returns either a DepType, the - /// dependence result, if it could already be determined, or a - /// DepDistanceStrideAndSizeInfo struct. + /// for the dependence between A and B. Returns a DepType, if we can prove + /// there's no dependence or the analysis fails. Outlined to lambda to limit + /// he scope of various temporary variables, like A/BPtr, StrideA/BPtr and + /// others. Returns either the dependence result, if it could already be + /// determined, or a DepDistanceStrideAndSizeInfo struct, noting that + /// TypeByteSize could be 0 when store sizes mismatch, and this should be + /// checked in the caller. std::variant getDependenceDistanceStrideAndSize(const MemAccessInfo &A, Instruction *AInst, const MemAccessInfo &B, diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h index c04380667a640..08a7ddb6929f5 100644 --- a/llvm/include/llvm/BinaryFormat/DXContainer.h +++ b/llvm/include/llvm/BinaryFormat/DXContainer.h @@ -185,6 +185,15 @@ enum class DescriptorRangeFlags : uint32_t { LLVM_ABI ArrayRef> getDescriptorRangeFlags(); +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) Enum = Num, +enum class StaticSamplerFlags : uint32_t { +#include "DXContainerConstants.def" + + LLVM_MARK_AS_BITMASK_ENUM(NonNormalizedCoordinates) +}; + +LLVM_ABI ArrayRef> getStaticSamplerFlags(); + #define ROOT_PARAMETER(Val, Enum) Enum = Val, enum class RootParameterType : uint32_t { #include "DXContainerConstants.def" @@ -813,6 +822,22 @@ struct DescriptorRange { } }; } // namespace v2 + +namespace v3 { +struct StaticSampler : public v1::StaticSampler { + uint32_t Flags; + + StaticSampler() = default; + explicit StaticSampler(v1::StaticSampler &Base) + : v1::StaticSampler(Base), Flags(0U) {} + + void swapBytes() { + v1::StaticSampler::swapBytes(); + sys::swapByteOrder(Flags); + } +}; + +} // namespace v3 } // namespace RTS0 // D3D_ROOT_SIGNATURE_VERSION diff --git a/llvm/include/llvm/BinaryFormat/DXContainerConstants.def b/llvm/include/llvm/BinaryFormat/DXContainerConstants.def index 889653611d79a..f576d958037cd 100644 --- a/llvm/include/llvm/BinaryFormat/DXContainerConstants.def +++ b/llvm/include/llvm/BinaryFormat/DXContainerConstants.def @@ -104,6 +104,16 @@ DESCRIPTOR_RANGE_FLAG(0x10000, DescriptorsStaticKeepingBufferBoundsChecks, DESCR #undef DESCRIPTOR_RANGE_FLAG #endif // DESCRIPTOR_RANGE_FLAG +// STATIC_SAMPLER_FLAG(flag value, name, flag). +#ifdef STATIC_SAMPLER_FLAG + +STATIC_SAMPLER_FLAG(0x0, None, SAMPLER_FLAG_NONE) +STATIC_SAMPLER_FLAG(0x1, UintBorderColor, SAMPLER_FLAG_UINT_BORDER_COLOR) +STATIC_SAMPLER_FLAG(0x2, NonNormalizedCoordinates, SAMPLER_FLAG_NON_NORMALIZED_COORDINATES) + +#undef STATIC_SAMPLER_FLAG +#endif // STATIC_SAMPLER_FLAG + #ifdef ROOT_PARAMETER ROOT_PARAMETER(0, DescriptorTable) diff --git a/llvm/include/llvm/Bitstream/BitstreamWriter.h b/llvm/include/llvm/Bitstream/BitstreamWriter.h index 5f53681320ce4..a2938642f824a 100644 --- a/llvm/include/llvm/Bitstream/BitstreamWriter.h +++ b/llvm/include/llvm/Bitstream/BitstreamWriter.h @@ -87,7 +87,7 @@ class BitstreamWriter { void WriteWord(unsigned Value) { Value = - support::endian::byte_swap(Value); + support::endian::byte_swap(Value, llvm::endianness::little); Buffer.append(reinterpret_cast(&Value), reinterpret_cast(&Value + 1)); } diff --git a/llvm/include/llvm/CodeGen/LiveInterval.h b/llvm/include/llvm/CodeGen/LiveInterval.h index e1c5717f5face..f18c177b1c35b 100644 --- a/llvm/include/llvm/CodeGen/LiveInterval.h +++ b/llvm/include/llvm/CodeGen/LiveInterval.h @@ -83,8 +83,16 @@ namespace llvm { /// Mark this value as unused. void markUnused() { def = SlotIndex(); } + + LLVM_ABI void print(raw_ostream &OS) const; + LLVM_ABI void dump() const; }; + inline raw_ostream &operator<<(raw_ostream &OS, const VNInfo &VNI) { + VNI.print(OS); + return OS; + } + /// Result of a LiveRange query. This class hides the implementation details /// of live ranges, and it should be used as the primary interface for /// examining live ranges around instructions. diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h index 6d026796e93b7..71739278cf513 100644 --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -1040,7 +1040,9 @@ class MachineBasicBlock /// Succ, can be split. If this returns true a subsequent call to /// SplitCriticalEdge is guaranteed to return a valid basic block if /// no changes occurred in the meantime. - LLVM_ABI bool canSplitCriticalEdge(const MachineBasicBlock *Succ) const; + LLVM_ABI bool + canSplitCriticalEdge(const MachineBasicBlock *Succ, + const MachineLoopInfo *MLI = nullptr) const; void pop_front() { Insts.pop_front(); } void pop_back() { Insts.pop_back(); } diff --git a/llvm/include/llvm/CodeGen/RDFGraph.h b/llvm/include/llvm/CodeGen/RDFGraph.h index 8a93afbcb5491..6bb6033a8a2f2 100644 --- a/llvm/include/llvm/CodeGen/RDFGraph.h +++ b/llvm/include/llvm/CodeGen/RDFGraph.h @@ -447,7 +447,7 @@ struct NodeAllocator { AllocatorTy MemPool; }; -using RegisterSet = std::set; +using RegisterSet = std::set; struct TargetOperandInfo { TargetOperandInfo(const TargetInstrInfo &tii) : TII(tii) {} diff --git a/llvm/include/llvm/CodeGen/RDFRegisters.h b/llvm/include/llvm/CodeGen/RDFRegisters.h index 4a9a4063c9e83..82027cad53bdb 100644 --- a/llvm/include/llvm/CodeGen/RDFRegisters.h +++ b/llvm/include/llvm/CodeGen/RDFRegisters.h @@ -199,6 +199,33 @@ struct PhysicalRegisterInfo { std::vector AliasInfos; }; +struct RegisterRefEqualTo { + constexpr RegisterRefEqualTo(const llvm::rdf::PhysicalRegisterInfo &pri) + : PRI(&pri) {} + + bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const { + return PRI->equal_to(A, B); + } + +private: + // Make it a pointer just in case. See comment in `RegisterRefLess` below. + const llvm::rdf::PhysicalRegisterInfo *PRI; +}; + +struct RegisterRefLess { + constexpr RegisterRefLess(const llvm::rdf::PhysicalRegisterInfo &pri) + : PRI(&pri) {} + + bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const { + return PRI->less(A, B); + } + +private: + // Make it a pointer because apparently some versions of MSVC use std::swap + // on the comparator object. + const llvm::rdf::PhysicalRegisterInfo *PRI; +}; + struct RegisterAggr { RegisterAggr(const PhysicalRegisterInfo &pri) : Units(pri.getTRI().getNumRegUnits()), PRI(pri) {} @@ -334,18 +361,6 @@ template <> struct hash { } }; -template <> struct equal_to { - constexpr equal_to(const llvm::rdf::PhysicalRegisterInfo &pri) : PRI(&pri) {} - - bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const { - return PRI->equal_to(A, B); - } - -private: - // Make it a pointer just in case. See comment in `less` below. - const llvm::rdf::PhysicalRegisterInfo *PRI; -}; - template <> struct equal_to { bool operator()(const llvm::rdf::RegisterAggr &A, const llvm::rdf::RegisterAggr &B) const { @@ -353,23 +368,10 @@ template <> struct equal_to { } }; -template <> struct less { - constexpr less(const llvm::rdf::PhysicalRegisterInfo &pri) : PRI(&pri) {} - - bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const { - return PRI->less(A, B); - } - -private: - // Make it a pointer because apparently some versions of MSVC use std::swap - // on the std::less specialization. - const llvm::rdf::PhysicalRegisterInfo *PRI; -}; - } // namespace std namespace llvm::rdf { -using RegisterSet = std::set>; +using RegisterSet = std::set; } // namespace llvm::rdf #endif // LLVM_CODEGEN_RDFREGISTERS_H diff --git a/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h b/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h index c6d7c32c4ad95..bfcbf728d415c 100644 --- a/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h +++ b/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h @@ -48,6 +48,91 @@ class RootSignatureValidationError } }; +class OffsetAppendAfterOverflow : public ErrorInfo { +public: + static char ID; + dxil::ResourceClass Type; + uint32_t Register; + uint32_t Space; + + OffsetAppendAfterOverflow(dxil::ResourceClass Type, uint32_t Register, + uint32_t Space) + : Type(Type), Register(Register), Space(Space) {} + + void log(raw_ostream &OS) const override { + OS << "Range " << getResourceClassName(Type) << "(register=" << Register + << ", space=" << Space << ") " + << "cannot be appended after an unbounded range "; + } + + std::error_code convertToErrorCode() const override { + return llvm::inconvertibleErrorCode(); + } +}; + +class ShaderRegisterOverflowError + : public ErrorInfo { +public: + static char ID; + dxil::ResourceClass Type; + uint32_t Register; + uint32_t Space; + + ShaderRegisterOverflowError(dxil::ResourceClass Type, uint32_t Register, + uint32_t Space) + : Type(Type), Register(Register), Space(Space) {} + + void log(raw_ostream &OS) const override { + OS << "Overflow for shader register range: " << getResourceClassName(Type) + << "(register=" << Register << ", space=" << Space << ")."; + } + + std::error_code convertToErrorCode() const override { + return llvm::inconvertibleErrorCode(); + } +}; + +class OffsetOverflowError : public ErrorInfo { +public: + static char ID; + dxil::ResourceClass Type; + uint32_t Register; + uint32_t Space; + + OffsetOverflowError(dxil::ResourceClass Type, uint32_t Register, + uint32_t Space) + : Type(Type), Register(Register), Space(Space) {} + + void log(raw_ostream &OS) const override { + OS << "Offset overflow for descriptor range: " << getResourceClassName(Type) + << "(register=" << Register << ", space=" << Space << ")."; + } + + std::error_code convertToErrorCode() const override { + return llvm::inconvertibleErrorCode(); + } +}; + +class TableSamplerMixinError : public ErrorInfo { +public: + static char ID; + dxil::ResourceClass Type; + uint32_t Location; + + TableSamplerMixinError(dxil::ResourceClass Type, uint32_t Location) + : Type(Type), Location(Location) {} + + void log(raw_ostream &OS) const override { + OS << "Samplers cannot be mixed with other " + << "resource types in a descriptor table, " << getResourceClassName(Type) + << "(location=" << Location << ")"; + } + + std::error_code convertToErrorCode() const override { + return llvm::inconvertibleErrorCode(); + } +}; + class GenericRSMetadataError : public ErrorInfo { public: LLVM_ABI static char ID; diff --git a/llvm/include/llvm/Frontend/HLSL/RootSignatureValidations.h b/llvm/include/llvm/Frontend/HLSL/RootSignatureValidations.h index 49c5967aebd3e..4dd18111b0c9d 100644 --- a/llvm/include/llvm/Frontend/HLSL/RootSignatureValidations.h +++ b/llvm/include/llvm/Frontend/HLSL/RootSignatureValidations.h @@ -33,6 +33,7 @@ LLVM_ABI bool verifyRangeType(uint32_t Type); LLVM_ABI bool verifyDescriptorRangeFlag(uint32_t Version, dxil::ResourceClass Type, dxbc::DescriptorRangeFlags FlagsVal); +LLVM_ABI bool verifyStaticSamplerFlags(uint32_t Version, uint32_t FlagsNumber); LLVM_ABI bool verifyNumDescriptors(uint32_t NumDescriptors); LLVM_ABI bool verifyMipLODBias(float MipLODBias); LLVM_ABI bool verifyMaxAnisotropy(uint32_t MaxAnisotropy); diff --git a/llvm/include/llvm/Frontend/Offloading/OffloadWrapper.h b/llvm/include/llvm/Frontend/Offloading/OffloadWrapper.h index 6b9da06707261..24017492e30b2 100644 --- a/llvm/include/llvm/Frontend/Offloading/OffloadWrapper.h +++ b/llvm/include/llvm/Frontend/Offloading/OffloadWrapper.h @@ -13,6 +13,8 @@ #include "llvm/IR/Module.h" #include "llvm/Support/Compiler.h" +#include + namespace llvm { namespace offloading { using EntryArrayTy = std::pair; @@ -52,6 +54,24 @@ LLVM_ABI llvm::Error wrapHIPBinary(llvm::Module &M, llvm::ArrayRef Images, EntryArrayTy EntryArray, llvm::StringRef Suffix = "", bool EmitSurfacesAndTextures = true); + +struct SYCLJITOptions { + // Target/compiler specific options that are suggested to use to "compile" + // program at runtime. + std::string CompileOptions; + // Target/compiler specific options that are suggested to use to "link" + // program at runtime. + std::string LinkOptions; +}; + +/// Wraps OffloadBinaries in the given \p Buffers into the module \p M +/// as global symbols and registers the images with the SYCL Runtime. +/// \param Options Compiler and linker options to be encoded for the later +/// use by a runtime for JIT compilation. +LLVM_ABI llvm::Error +wrapSYCLBinaries(llvm::Module &M, llvm::ArrayRef Buffer, + SYCLJITOptions Options = SYCLJITOptions()); + } // namespace offloading } // namespace llvm diff --git a/llvm/include/llvm/Frontend/Offloading/Utility.h b/llvm/include/llvm/Frontend/Offloading/Utility.h index f8a2b1237b5e1..23e6702beb476 100644 --- a/llvm/include/llvm/Frontend/Offloading/Utility.h +++ b/llvm/include/llvm/Frontend/Offloading/Utility.h @@ -82,7 +82,8 @@ LLVM_ABI StructType *getEntryTy(Module &M); /// \param Data Extra data storage associated with the entry. /// \param SectionName The section this entry will be placed at. /// \param AuxAddr An extra pointer if needed. -LLVM_ABI void +/// \return The emitted global variable containing the offloading entry. +LLVM_ABI GlobalVariable * emitOffloadingEntry(Module &M, object::OffloadKind Kind, Constant *Addr, StringRef Name, uint64_t Size, uint32_t Flags, uint64_t Data, Constant *AuxAddr = nullptr, diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h index 1ade9ce0c3a7d..db781b58944bc 100644 --- a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h +++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h @@ -1268,6 +1268,15 @@ struct WriteT { using EmptyTrait = std::true_type; }; +// V6: [6.4.7] Looprange clause +template struct LoopRangeT { + using Begin = E; + using End = E; + + using TupleTrait = std::true_type; + std::tuple t; +}; + // --- template @@ -1300,8 +1309,8 @@ using TupleClausesT = DoacrossT, DynGroupprivateT, FromT, GrainsizeT, IfT, InitT, InReductionT, LastprivateT, LinearT, - MapT, NumTasksT, OrderT, - ReductionT, ScheduleT, + LoopRangeT, MapT, NumTasksT, + OrderT, ReductionT, ScheduleT, TaskReductionT, ToT>; template diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td index 4d9b8f8a6c51e..38f95a11bf85f 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMP.td +++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td @@ -284,6 +284,10 @@ def OMPC_Linear : Clause<[Spelling<"linear">]> { def OMPC_Link : Clause<[Spelling<"link">]> { let flangClass = "OmpObjectList"; } +def OMPC_LoopRange : Clause<[Spelling<"looprange">]> { + let clangClass = "OMPLoopRangeClause"; + let flangClass = "OmpLoopRangeClause"; +} def OMPC_Map : Clause<[Spelling<"map">]> { let clangClass = "OMPMapClause"; let flangClass = "OmpMapClause"; @@ -902,6 +906,11 @@ def OMP_Groupprivate : Directive<[Spelling<"groupprivate">]> { let category = CA_Declarative; let languages = [L_C, L_Fortran]; } +def OMP_Fuse : Directive<[Spelling<"fuse">]> { + let allowedOnceClauses = [VersionedClause]; + let association = AS_Block; + let category = CA_Executable; +} def OMP_Interchange : Directive<[Spelling<"interchange">]> { let allowedOnceClauses = [ VersionedClause, @@ -2087,9 +2096,11 @@ def OMP_TargetParallel : Directive<[Spelling<"target parallel">]> { let allowedOnceClauses = [ VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, ]; let leafConstructs = [OMP_Target, OMP_Parallel]; @@ -2117,12 +2128,14 @@ def OMP_TargetParallelDo : Directive<[Spelling<"target parallel do">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, ]; let leafConstructs = [OMP_Target, OMP_Parallel, OMP_Do]; let category = CA_Executable; @@ -2146,6 +2159,7 @@ def OMP_TargetParallelDoSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2156,6 +2170,7 @@ def OMP_TargetParallelDoSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2180,6 +2195,7 @@ def OMP_TargetParallelFor : Directive<[Spelling<"target parallel for">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2189,6 +2205,7 @@ def OMP_TargetParallelFor : Directive<[Spelling<"target parallel for">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, ]; @@ -2218,6 +2235,7 @@ def OMP_TargetParallelForSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2229,6 +2247,7 @@ def OMP_TargetParallelForSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2264,11 +2283,13 @@ def OMP_target_parallel_loop : Directive<[Spelling<"target parallel loop">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, ]; let leafConstructs = [OMP_Target, OMP_Parallel, OMP_loop]; @@ -2299,12 +2320,14 @@ def OMP_TargetSimd : Directive<[Spelling<"target simd">]> { VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, ]; @@ -2397,12 +2420,14 @@ def OMP_TargetTeamsDistributeParallelDo VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, ]; let leafConstructs = @@ -2436,6 +2461,7 @@ def OMP_TargetTeamsDistributeParallelDoSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2443,6 +2469,7 @@ def OMP_TargetTeamsDistributeParallelDoSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, ]; @@ -2468,6 +2495,7 @@ def OMP_TargetTeamsDistributeParallelFor VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2477,6 +2505,7 @@ def OMP_TargetTeamsDistributeParallelFor VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2508,6 +2537,7 @@ def OMP_TargetTeamsDistributeParallelForSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, @@ -2519,6 +2549,7 @@ def OMP_TargetTeamsDistributeParallelForSimd VersionedClause, VersionedClause, VersionedClause, + VersionedClause, VersionedClause, VersionedClause, VersionedClause, diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h index f43ef932e965a..0a11617ea971c 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h @@ -1085,11 +1085,13 @@ class OpenMPIRBuilder { /// preheader of the loop. /// \param LoopType Information about type of loop worksharing. /// It corresponds to type of loop workshare OpenMP pragma. + /// \param NoLoop If true, no-loop code is generated. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, - omp::WorksharingLoopType LoopType); + omp::WorksharingLoopType LoopType, + bool NoLoop); /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// @@ -1209,6 +1211,7 @@ class OpenMPIRBuilder { /// present. /// \param LoopType Information about type of loop worksharing. /// It corresponds to type of loop workshare OpenMP pragma. + /// \param NoLoop If true, no-loop code is generated. /// /// \returns Point where to insert code after the workshare construct. LLVM_ABI InsertPointOrErrorTy applyWorkshareLoop( @@ -1219,7 +1222,8 @@ class OpenMPIRBuilder { bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false, bool HasOrderedClause = false, omp::WorksharingLoopType LoopType = - omp::WorksharingLoopType::ForStaticLoop); + omp::WorksharingLoopType::ForStaticLoop, + bool NoLoop = false); /// Tile a loop nest. /// @@ -1402,7 +1406,7 @@ class OpenMPIRBuilder { /// any. LLVM_ABI static TargetRegionEntryInfo getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack, - StringRef ParentName = ""); + vfs::FileSystem &VFS, StringRef ParentName = ""); /// Enum class for the RedctionGen CallBack type to be used. enum class ReductionGenCBKind { Clang, MLIR }; diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index dba44e7c3c506..96da698538314 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -297,46 +297,39 @@ def IIT_MMX : IIT_VT; def IIT_TOKEN : IIT_VT; def IIT_METADATA : IIT_VT; def IIT_EMPTYSTRUCT : IIT_VT; -def IIT_STRUCT2 : IIT_Base<21>; -def IIT_STRUCT3 : IIT_Base<22>; -def IIT_STRUCT4 : IIT_Base<23>; -def IIT_STRUCT5 : IIT_Base<24>; -def IIT_EXTEND_ARG : IIT_Base<25>; -def IIT_TRUNC_ARG : IIT_Base<26>; -def IIT_ANYPTR : IIT_Base<27>; -def IIT_V1 : IIT_Vec<1, 28>; -def IIT_VARARG : IIT_VT; -def IIT_ONE_NTH_ELTS_VEC_ARG : IIT_Base<30>; -def IIT_SAME_VEC_WIDTH_ARG : IIT_Base<31>; -def IIT_VEC_OF_ANYPTRS_TO_ELT : IIT_Base<34>; -def IIT_I128 : IIT_Int<128, 35>; -def IIT_V512 : IIT_Vec<512, 36>; -def IIT_V1024 : IIT_Vec<1024, 37>; -def IIT_STRUCT6 : IIT_Base<38>; -def IIT_STRUCT7 : IIT_Base<39>; -def IIT_STRUCT8 : IIT_Base<40>; -def IIT_F128 : IIT_VT; -def IIT_VEC_ELEMENT : IIT_Base<42>; -def IIT_SCALABLE_VEC : IIT_Base<43>; -def IIT_SUBDIVIDE2_ARG : IIT_Base<44>; -def IIT_SUBDIVIDE4_ARG : IIT_Base<45>; -def IIT_VEC_OF_BITCASTS_TO_INT : IIT_Base<46>; -def IIT_V128 : IIT_Vec<128, 47>; -def IIT_BF16 : IIT_VT; -def IIT_STRUCT9 : IIT_Base<49>; -def IIT_V256 : IIT_Vec<256, 50>; -def IIT_AMX : IIT_VT; -def IIT_PPCF128 : IIT_VT; -def IIT_V3 : IIT_Vec<3, 53>; -def IIT_EXTERNREF : IIT_VT; -def IIT_FUNCREF : IIT_VT; -def IIT_I2 : IIT_Int<2, 57>; -def IIT_I4 : IIT_Int<4, 58>; -def IIT_AARCH64_SVCOUNT : IIT_VT; -def IIT_V6 : IIT_Vec<6, 60>; -def IIT_V10 : IIT_Vec<10, 61>; -def IIT_V2048 : IIT_Vec<2048, 62>; -def IIT_V4096 : IIT_Vec<4096, 63>; +def IIT_STRUCT : IIT_Base<21>; +def IIT_EXTEND_ARG : IIT_Base<22>; +def IIT_TRUNC_ARG : IIT_Base<23>; +def IIT_ANYPTR : IIT_Base<24>; +def IIT_V1 : IIT_Vec<1, 25>; +def IIT_VARARG : IIT_VT; +def IIT_ONE_NTH_ELTS_VEC_ARG : IIT_Base<27>; +def IIT_SAME_VEC_WIDTH_ARG : IIT_Base<28>; +def IIT_VEC_OF_ANYPTRS_TO_ELT : IIT_Base<29>; +def IIT_I128 : IIT_Int<128, 30>; +def IIT_V512 : IIT_Vec<512, 31>; +def IIT_V1024 : IIT_Vec<1024, 32>; +def IIT_F128 : IIT_VT; +def IIT_VEC_ELEMENT : IIT_Base<34>; +def IIT_SCALABLE_VEC : IIT_Base<35>; +def IIT_SUBDIVIDE2_ARG : IIT_Base<36>; +def IIT_SUBDIVIDE4_ARG : IIT_Base<37>; +def IIT_VEC_OF_BITCASTS_TO_INT : IIT_Base<38>; +def IIT_V128 : IIT_Vec<128, 39>; +def IIT_BF16 : IIT_VT; +def IIT_V256 : IIT_Vec<256, 41>; +def IIT_AMX : IIT_VT; +def IIT_PPCF128 : IIT_VT; +def IIT_V3 : IIT_Vec<3, 44>; +def IIT_EXTERNREF : IIT_VT; +def IIT_FUNCREF : IIT_VT; +def IIT_I2 : IIT_Int<2, 47>; +def IIT_I4 : IIT_Int<4, 48>; +def IIT_AARCH64_SVCOUNT : IIT_VT; +def IIT_V6 : IIT_Vec<6, 50>; +def IIT_V10 : IIT_Vec<10, 51>; +def IIT_V2048 : IIT_Vec<2048, 52>; +def IIT_V4096 : IIT_Vec<4096, 53>; } defvar IIT_all_FixedTypes = !filter(iit, IIT_all, @@ -345,19 +338,6 @@ defvar IIT_all_FixedTypes = !filter(iit, IIT_all, defvar IIT_all_VectorTypes = !filter(iit, IIT_all, !isa(iit)); -defvar IIT_RetNumbers = [ - [IIT_Done.Number], - [], - [IIT_STRUCT2.Number], - [IIT_STRUCT3.Number], - [IIT_STRUCT4.Number], - [IIT_STRUCT5.Number], - [IIT_STRUCT6.Number], - [IIT_STRUCT7.Number], - [IIT_STRUCT8.Number], - [IIT_STRUCT9.Number], -]; - //===----------------------------------------------------------------------===// // Types used by intrinsics. //===----------------------------------------------------------------------===// @@ -663,7 +643,10 @@ class TypeInfoGen< !if(!isa(ty), ACTys[MappingRIdxs[ty.Number]], ty)); list TypeSig = !listflatten(!listconcat( - [IIT_RetNumbers[!size(RetTypes)]], + [!cond( + !eq(!size(RetTypes), 0): [IIT_Done.Number], + !eq(!size(RetTypes), 1): [], + true: [IIT_STRUCT.Number, !sub(!size(RetTypes), 2)])], !foreach(i, !range(AllTypes), !foreach(a, AllTypes[i].Sig, ResolveArgCode< @@ -977,8 +960,12 @@ def int_instrprof_mcdc_tvbitmap_update : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i32_ty, llvm_ptr_ty]>; -def int_call_preallocated_setup : DefaultAttrsIntrinsic<[llvm_token_ty], [llvm_i32_ty]>; -def int_call_preallocated_arg : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_i32_ty]>; +def int_call_preallocated_setup + : DefaultAttrsIntrinsic<[llvm_token_ty], [llvm_i32_ty], + [ImmArg>]>; +def int_call_preallocated_arg + : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_i32_ty], + [ImmArg>]>; def int_call_preallocated_teardown : DefaultAttrsIntrinsic<[], [llvm_token_ty]>; // This intrinsic is intentionally undocumented and users shouldn't call it; diff --git a/llvm/include/llvm/MC/DXContainerRootSignature.h b/llvm/include/llvm/MC/DXContainerRootSignature.h index 54677ef70244f..2b08b2439d2c0 100644 --- a/llvm/include/llvm/MC/DXContainerRootSignature.h +++ b/llvm/include/llvm/MC/DXContainerRootSignature.h @@ -74,6 +74,8 @@ struct StaticSampler { uint32_t ShaderRegister; uint32_t RegisterSpace; dxbc::ShaderVisibility ShaderVisibility; + // Version 3 onwards: + uint32_t Flags = 0; }; struct RootParametersContainer { diff --git a/llvm/include/llvm/Object/DXContainer.h b/llvm/include/llvm/Object/DXContainer.h index 9bc1918852335..5a5a4dbaae2ad 100644 --- a/llvm/include/llvm/Object/DXContainer.h +++ b/llvm/include/llvm/Object/DXContainer.h @@ -228,11 +228,11 @@ class RootSignature { uint32_t Flags; ViewArray ParametersHeaders; StringRef PartData; - ViewArray StaticSamplers; + ViewArray StaticSamplers; using param_header_iterator = ViewArray::iterator; - using samplers_iterator = ViewArray::iterator; + using samplers_iterator = ViewArray::iterator; public: RootSignature(StringRef PD) : PartData(PD) {} diff --git a/llvm/include/llvm/Object/OffloadBinary.h b/llvm/include/llvm/Object/OffloadBinary.h index b5c845fa8eb70..ac2dbf60e2aec 100644 --- a/llvm/include/llvm/Object/OffloadBinary.h +++ b/llvm/include/llvm/Object/OffloadBinary.h @@ -48,6 +48,7 @@ enum ImageKind : uint16_t { IMG_Cubin, IMG_Fatbinary, IMG_PTX, + IMG_SPIRV, IMG_LAST, }; @@ -70,9 +71,9 @@ class OffloadBinary : public Binary { /// The offloading metadata that will be serialized to a memory buffer. struct OffloadingImage { - ImageKind TheImageKind; - OffloadKind TheOffloadKind; - uint32_t Flags; + ImageKind TheImageKind = ImageKind::IMG_None; + OffloadKind TheOffloadKind = OffloadKind::OFK_None; + uint32_t Flags = 0; MapVector StringData; std::unique_ptr Image; }; diff --git a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h index 62bfee7693db1..b5b110d0f59a1 100644 --- a/llvm/include/llvm/ObjectYAML/DXContainerYAML.h +++ b/llvm/include/llvm/ObjectYAML/DXContainerYAML.h @@ -178,6 +178,11 @@ struct StaticSamplerYamlDesc { uint32_t ShaderRegister; uint32_t RegisterSpace; dxbc::ShaderVisibility ShaderVisibility; + + LLVM_ABI uint32_t getEncodedFlags() const; + +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) bool Enum = false; +#include "llvm/BinaryFormat/DXContainerConstants.def" }; struct RootSignatureYamlDesc { diff --git a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h index 7d1a85ba528fc..e09958160b9a0 100644 --- a/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h +++ b/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h @@ -1215,19 +1215,19 @@ namespace accessors { /// Return the structural hash associated with the function. template uint64_t getFuncHash(const FuncRecordTy *Record) { - return support::endian::byte_swap(Record->FuncHash); + return support::endian::byte_swap(Record->FuncHash, Endian); } /// Return the coverage map data size for the function. template uint64_t getDataSize(const FuncRecordTy *Record) { - return support::endian::byte_swap(Record->DataSize); + return support::endian::byte_swap(Record->DataSize, Endian); } /// Return the function lookup key. The value is considered opaque. template uint64_t getFuncNameRef(const FuncRecordTy *Record) { - return support::endian::byte_swap(Record->NameRef); + return support::endian::byte_swap(Record->NameRef, Endian); } /// Return the PGO name of the function. Used for formats in which the name is @@ -1280,14 +1280,14 @@ struct CovMapFunctionRecordV1 { /// Return function lookup key. The value is consider opaque. template IntPtrT getFuncNameRef() const { - return support::endian::byte_swap(NamePtr); + return support::endian::byte_swap(NamePtr, Endian); } /// Return the PGO name of the function. template Error getFuncName(InstrProfSymtab &ProfileNames, StringRef &FuncName) const { IntPtrT NameRef = getFuncNameRef(); - uint32_t NameS = support::endian::byte_swap(NameSize); + uint32_t NameS = support::endian::byte_swap(NameSize, Endian); FuncName = ProfileNames.getFuncName(NameRef, NameS); if (NameS && FuncName.empty()) return make_error(coveragemap_error::malformed, @@ -1385,7 +1385,7 @@ struct CovMapFunctionRecordV3 { /// Get the filename set reference. template uint64_t getFilenamesRef() const { - return support::endian::byte_swap(FilenamesRef); + return support::endian::byte_swap(FilenamesRef, Endian); } /// Read the inline coverage mapping. Ignore the buffer parameter, it is for @@ -1416,19 +1416,19 @@ struct CovMapHeader { #define COVMAP_HEADER(Type, LLVMType, Name, Init) Type Name; #include "llvm/ProfileData/InstrProfData.inc" template uint32_t getNRecords() const { - return support::endian::byte_swap(NRecords); + return support::endian::byte_swap(NRecords, Endian); } template uint32_t getFilenamesSize() const { - return support::endian::byte_swap(FilenamesSize); + return support::endian::byte_swap(FilenamesSize, Endian); } template uint32_t getCoverageSize() const { - return support::endian::byte_swap(CoverageSize); + return support::endian::byte_swap(CoverageSize, Endian); } template uint32_t getVersion() const { - return support::endian::byte_swap(Version); + return support::endian::byte_swap(Version, Endian); } }; diff --git a/llvm/include/llvm/Support/Endian.h b/llvm/include/llvm/Support/Endian.h index 7eb1d7e8dfe7f..51db225841dbe 100644 --- a/llvm/include/llvm/Support/Endian.h +++ b/llvm/include/llvm/Support/Endian.h @@ -49,7 +49,9 @@ template /// Swap the bytes of value to match the given endianness. template -[[nodiscard]] inline value_type byte_swap(value_type value) { +[[nodiscard]] +LLVM_DEPRECATED("Pass endian as a function argument instead", + "byte_swap") inline value_type byte_swap(value_type value) { return byte_swap(value, endian); } @@ -66,7 +68,9 @@ template } template -[[nodiscard]] inline value_type read(const void *memory) { +[[nodiscard]] LLVM_DEPRECATED("Pass endian as a function argument instead", + "read") inline value_type + read(const void *memory) { return read(memory, endian); } @@ -127,7 +131,7 @@ template uint64_t startBit) { assert(startBit < 8); if (startBit == 0) - return read(memory); + return read(memory, endian); else { // Read two values and compose the result from them. value_type val[2]; @@ -135,8 +139,8 @@ template LLVM_ASSUME_ALIGNED( memory, (detail::PickAlignment::value)), sizeof(value_type) * 2); - val[0] = byte_swap(val[0]); - val[1] = byte_swap(val[1]); + val[0] = byte_swap(val[0], endian); + val[1] = byte_swap(val[1], endian); // Shift bits from the lower value into place. make_unsigned_t lowerVal = val[0] >> startBit; @@ -170,8 +174,8 @@ inline void writeAtBitAlignment(void *memory, value_type value, LLVM_ASSUME_ALIGNED( memory, (detail::PickAlignment::value)), sizeof(value_type) * 2); - val[0] = byte_swap(val[0]); - val[1] = byte_swap(val[1]); + val[0] = byte_swap(val[0], endian); + val[1] = byte_swap(val[1], endian); // Mask off any existing bits in the upper part of the lower value that // we want to replace. @@ -199,8 +203,8 @@ inline void writeAtBitAlignment(void *memory, value_type value, val[1] |= upperVal; // Finally, rewrite values. - val[0] = byte_swap(val[0]); - val[1] = byte_swap(val[1]); + val[0] = byte_swap(val[0], endian); + val[1] = byte_swap(val[1], endian); memcpy(LLVM_ASSUME_ALIGNED( memory, (detail::PickAlignment::value)), &val[0], sizeof(value_type) * 2); @@ -223,8 +227,8 @@ struct packed_endian_specific_integral { explicit packed_endian_specific_integral(value_type val) { *this = val; } value_type value() const { - return endian::read( - (const void*)Value.buffer); + return endian::read((const void *)Value.buffer, + endian); } operator value_type() const { return value(); } @@ -263,7 +267,7 @@ struct packed_endian_specific_integral { explicit ref(void *Ptr) : Ptr(Ptr) {} operator value_type() const { - return endian::read(Ptr); + return endian::read(Ptr, endian); } void operator=(value_type NewValue) { diff --git a/llvm/include/llvm/Support/FileCollector.h b/llvm/include/llvm/Support/FileCollector.h index b00bf3174e654..9cc6776b948ba 100644 --- a/llvm/include/llvm/Support/FileCollector.h +++ b/llvm/include/llvm/Support/FileCollector.h @@ -81,19 +81,25 @@ class LLVM_ABI FileCollector : public FileCollectorBase { /// Canonicalize a pair of virtual and real paths. LLVM_ABI PathStorage canonicalize(StringRef SrcPath); + explicit PathCanonicalizer(IntrusiveRefCntPtr VFS) + : VFS(std::move(VFS)) {} + private: /// Replace with a (mostly) real path, or don't modify. Resolves symlinks /// in the directory, using \a CachedDirs to avoid redundant lookups, but /// leaves the filename as a possible symlink. void updateWithRealPath(SmallVectorImpl &Path); + IntrusiveRefCntPtr VFS; + StringMap CachedDirs; }; /// \p Root is the directory where collected files are will be stored. /// \p OverlayRoot is VFS mapping root. /// \p Root directory gets created in copyFiles unless it already exists. - FileCollector(std::string Root, std::string OverlayRoot); + FileCollector(std::string Root, std::string OverlayRoot, + IntrusiveRefCntPtr VFS); /// Write the yaml mapping (for the VFS) to the given file. std::error_code writeMapping(StringRef MappingFile); diff --git a/llvm/include/llvm/Support/ProgramStack.h b/llvm/include/llvm/Support/ProgramStack.h index 0dd8235b90c06..13729a2990588 100644 --- a/llvm/include/llvm/Support/ProgramStack.h +++ b/llvm/include/llvm/Support/ProgramStack.h @@ -46,17 +46,15 @@ LLVM_ABI unsigned getDefaultStackSize(); LLVM_ABI void runOnNewStack(unsigned StackSize, function_ref Fn); template -std::enable_if_t, R> -runOnNewStack(unsigned StackSize, function_ref Fn, Ts &&...Args) { - std::optional Ret; - runOnNewStack(StackSize, [&]() { Ret = Fn(std::forward(Args)...); }); - return std::move(*Ret); -} - -template -void runOnNewStack(unsigned StackSize, function_ref Fn, +auto runOnNewStack(unsigned StackSize, function_ref Fn, Ts &&...Args) { - runOnNewStack(StackSize, [&]() { Fn(std::forward(Args)...); }); + if constexpr (std::is_same_v) { + runOnNewStack(StackSize, [&]() { Fn(std::forward(Args)...); }); + } else { + std::optional Ret; + runOnNewStack(StackSize, [&]() { Ret = Fn(std::forward(Args)...); }); + return std::move(*Ret); + } } } // namespace llvm diff --git a/llvm/include/llvm/Support/SMLoc.h b/llvm/include/llvm/Support/SMLoc.h index d7dde81ce0be7..c80969b1d83dc 100644 --- a/llvm/include/llvm/Support/SMLoc.h +++ b/llvm/include/llvm/Support/SMLoc.h @@ -28,8 +28,8 @@ class SMLoc { constexpr bool isValid() const { return Ptr != nullptr; } - constexpr bool operator==(const SMLoc &RHS) const { return RHS.Ptr == Ptr; } - constexpr bool operator!=(const SMLoc &RHS) const { return RHS.Ptr != Ptr; } + constexpr bool operator==(SMLoc RHS) const { return RHS.Ptr == Ptr; } + constexpr bool operator!=(SMLoc RHS) const { return RHS.Ptr != Ptr; } constexpr const char *getPointer() const { return Ptr; } diff --git a/llvm/include/llvm/Support/TrailingObjects.h b/llvm/include/llvm/Support/TrailingObjects.h index d7211a930ae49..3eb7c0bd1f379 100644 --- a/llvm/include/llvm/Support/TrailingObjects.h +++ b/llvm/include/llvm/Support/TrailingObjects.h @@ -57,25 +57,9 @@ namespace llvm { namespace trailing_objects_internal { -/// Helper template to calculate the max alignment requirement for a set of -/// objects. -template class AlignmentCalcHelper { -private: - enum { - FirstAlignment = alignof(First), - RestAlignment = AlignmentCalcHelper::Alignment, - }; -public: - enum { - Alignment = FirstAlignment > RestAlignment ? FirstAlignment : RestAlignment - }; -}; - -template class AlignmentCalcHelper { -public: - enum { Alignment = alignof(First) }; -}; +template +inline constexpr size_t MaxAlignment = std::max({alignof(T)...}); /// The base class for TrailingObjects* classes. class TrailingObjectsBase { @@ -209,11 +193,10 @@ class alignas(Align) TrailingObjectsImpl /// See the file comment for details on the usage of the /// TrailingObjects type. template -class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl< - trailing_objects_internal::AlignmentCalcHelper< - TrailingTys...>::Alignment, - BaseTy, TrailingObjects, - BaseTy, TrailingTys...> { +class TrailingObjects + : private trailing_objects_internal::TrailingObjectsImpl< + trailing_objects_internal::MaxAlignment, BaseTy, + TrailingObjects, BaseTy, TrailingTys...> { template friend class trailing_objects_internal::TrailingObjectsImpl; @@ -221,8 +204,8 @@ class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl< template class Foo {}; typedef trailing_objects_internal::TrailingObjectsImpl< - trailing_objects_internal::AlignmentCalcHelper::Alignment, - BaseTy, TrailingObjects, BaseTy, TrailingTys...> + trailing_objects_internal::MaxAlignment, BaseTy, + TrailingObjects, BaseTy, TrailingTys...> ParentType; using TrailingObjectsBase = trailing_objects_internal::TrailingObjectsBase; diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h index d4fa1e5d65749..cb2721aba4f25 100644 --- a/llvm/include/llvm/TableGen/Record.h +++ b/llvm/include/llvm/TableGen/Record.h @@ -1577,7 +1577,7 @@ class RecordVal { } /// Get the source location of the point where the field was defined. - const SMLoc &getLoc() const { return Loc; } + SMLoc getLoc() const { return Loc; } /// Is this a field where nonconcrete values are okay? bool isNonconcreteOK() const { diff --git a/llvm/include/llvm/Transforms/Scalar/SROA.h b/llvm/include/llvm/Transforms/Scalar/SROA.h index c03cdf48fb1c6..8bb65bf7225e0 100644 --- a/llvm/include/llvm/Transforms/Scalar/SROA.h +++ b/llvm/include/llvm/Transforms/Scalar/SROA.h @@ -1,4 +1,4 @@ -//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===// +//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp index 512ae415d1c3b..87fae92977cd2 100644 --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -2090,12 +2090,14 @@ MemoryDepChecker::getDependenceDistanceStrideAndSize( return MemoryDepChecker::Dependence::Unknown; } + TypeSize AStoreSz = DL.getTypeStoreSize(ATy); + TypeSize BStoreSz = DL.getTypeStoreSize(BTy); + + // If store sizes are not the same, set TypeByteSize to zero, so we can check + // it in the caller isDependent. uint64_t ASz = DL.getTypeAllocSize(ATy); uint64_t BSz = DL.getTypeAllocSize(BTy); - - // Both the source and sink sizes are neeeded in dependence checks, depending - // on the use. - std::pair TypeByteSize(ASz, BSz); + uint64_t TypeByteSize = (AStoreSz == BStoreSz) ? BSz : 0; uint64_t StrideAScaled = std::abs(StrideAPtrInt) * ASz; uint64_t StrideBScaled = std::abs(StrideBPtrInt) * BSz; @@ -2117,24 +2119,8 @@ MemoryDepChecker::getDependenceDistanceStrideAndSize( return Dependence::Unknown; } - // When the distance is possibly zero, we're reading/writing the same memory - // location: if the store sizes are not equal, fail with an unknown - // dependence. - TypeSize AStoreSz = DL.getTypeStoreSize(ATy); - TypeSize BStoreSz = DL.getTypeStoreSize(BTy); - if (AStoreSz != BStoreSz && SE.isKnownNonPositive(Dist) && - SE.isKnownNonNegative(Dist)) { - LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence distance with " - "different type sizes\n"); - return Dependence::Unknown; - } - - // TODO: Remove this. - bool HasSameSize = AStoreSz == BStoreSz; - return DepDistanceStrideAndSizeInfo(Dist, MaxStride, CommonStride, - TypeByteSize, HasSameSize, AIsWrite, - BIsWrite); + TypeByteSize, AIsWrite, BIsWrite); } MemoryDepChecker::Dependence::DepType @@ -2166,8 +2152,9 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, return std::get(Res); } - auto &[Dist, MaxStride, CommonStride, TypeByteSize, HasSameSize, AIsWrite, - BIsWrite] = std::get(Res); + auto &[Dist, MaxStride, CommonStride, TypeByteSize, AIsWrite, BIsWrite] = + std::get(Res); + bool HasSameSize = TypeByteSize > 0; ScalarEvolution &SE = *PSE.getSE(); auto &DL = InnermostLoop->getHeader()->getDataLayout(); @@ -2193,8 +2180,7 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, // If the distance between accesses and their strides are known constants, // check whether the accesses interlace each other. if (ConstDist > 0 && CommonStride && CommonStride > 1 && HasSameSize && - areStridedAccessesIndependent(ConstDist, *CommonStride, - TypeByteSize.first)) { + areStridedAccessesIndependent(ConstDist, *CommonStride, TypeByteSize)) { LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n"); return Dependence::NoDep; } @@ -2208,9 +2194,13 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, // Negative distances are not plausible dependencies. if (SE.isKnownNonPositive(Dist)) { if (SE.isKnownNonNegative(Dist)) { - // Write to the same location with the same size. - assert(HasSameSize && "Accesses must have the same size"); - return Dependence::Forward; + if (HasSameSize) { + // Write to the same location with the same size. + return Dependence::Forward; + } + LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but " + "different type sizes\n"); + return Dependence::Unknown; } bool IsTrueDataDependence = (AIsWrite && !BIsWrite); @@ -2228,7 +2218,7 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, : Dependence::Unknown; } if (!HasSameSize || - couldPreventStoreLoadForward(ConstDist, TypeByteSize.first)) { + couldPreventStoreLoadForward(ConstDist, TypeByteSize)) { LLVM_DEBUG( dbgs() << "LAA: Forward but may prevent st->ld forwarding\n"); return Dependence::ForwardButPreventsForwarding; @@ -2294,8 +2284,7 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, // We know that Dist is positive, but it may not be constant. Use the signed // minimum for computations below, as this ensures we compute the closest // possible dependence distance. - uint64_t MinDistanceNeeded = - MaxStride * (MinNumIter - 1) + TypeByteSize.first; + uint64_t MinDistanceNeeded = MaxStride * (MinNumIter - 1) + TypeByteSize; if (MinDistanceNeeded > static_cast(MinDistance)) { if (!ConstDist) { // For non-constant distances, we checked the lower bound of the @@ -2323,15 +2312,14 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx, bool IsTrueDataDependence = (!AIsWrite && BIsWrite); if (IsTrueDataDependence && EnableForwardingConflictDetection && ConstDist && - couldPreventStoreLoadForward(MinDistance, TypeByteSize.first, - *CommonStride)) + couldPreventStoreLoadForward(MinDistance, TypeByteSize, *CommonStride)) return Dependence::BackwardVectorizableButPreventsForwarding; uint64_t MaxVF = MinDepDistBytes / MaxStride; LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance << " with max VF = " << MaxVF << '\n'); - uint64_t MaxVFInBits = MaxVF * TypeByteSize.first * 8; + uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8; if (!ConstDist && MaxVFInBits < MaxTargetVectorWidthInBits) { // For non-constant distances, we checked the lower bound of the dependence // distance and the distance may be larger at runtime (and safe for diff --git a/llvm/lib/BinaryFormat/DXContainer.cpp b/llvm/lib/BinaryFormat/DXContainer.cpp index 36d10d0b63078..c06a3e34653f0 100644 --- a/llvm/lib/BinaryFormat/DXContainer.cpp +++ b/llvm/lib/BinaryFormat/DXContainer.cpp @@ -89,6 +89,15 @@ ArrayRef> dxbc::getDescriptorRangeFlags() { return ArrayRef(DescriptorRangeFlagNames); } +static const EnumEntry StaticSamplerFlagNames[] = { +#define STATIC_SAMPLER_FLAG(Val, Enum, Flag) {#Enum, StaticSamplerFlags::Enum}, +#include "llvm/BinaryFormat/DXContainerConstants.def" +}; + +ArrayRef> dxbc::getStaticSamplerFlags() { + return ArrayRef(StaticSamplerFlagNames); +} + #define SHADER_VISIBILITY(Val, Enum) {#Enum, ShaderVisibility::Enum}, static const EnumEntry ShaderVisibilityValues[] = { diff --git a/llvm/lib/CGData/CodeGenDataReader.cpp b/llvm/lib/CGData/CodeGenDataReader.cpp index fc59be8df525a..3fd8cfe1a8762 100644 --- a/llvm/lib/CGData/CodeGenDataReader.cpp +++ b/llvm/lib/CGData/CodeGenDataReader.cpp @@ -169,8 +169,8 @@ bool IndexedCodeGenDataReader::hasFormat(const MemoryBuffer &DataBuffer) { if (DataBuffer.getBufferSize() < sizeof(IndexedCGData::Magic)) return false; - uint64_t Magic = endian::read( - DataBuffer.getBufferStart()); + uint64_t Magic = endian::read(DataBuffer.getBufferStart(), + llvm::endianness::little); // Verify that it's magical. return Magic == IndexedCGData::Magic; } diff --git a/llvm/lib/CGData/CodeGenDataWriter.cpp b/llvm/lib/CGData/CodeGenDataWriter.cpp index 14a8558ba63b7..a2bbceebd0317 100644 --- a/llvm/lib/CGData/CodeGenDataWriter.cpp +++ b/llvm/lib/CGData/CodeGenDataWriter.cpp @@ -40,7 +40,7 @@ void CGDataOStream::patch(ArrayRef P) { for (const auto &K : P) { for (size_t I = 0; I < K.D.size(); ++I) { uint64_t Bytes = - endian::byte_swap(K.D[I]); + endian::byte_swap(K.D[I], llvm::endianness::little); Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t), reinterpret_cast(&Bytes), sizeof(uint64_t)); } @@ -52,7 +52,7 @@ void CGDataOStream::patch(ArrayRef P) { for (const auto &K : P) { for (size_t I = 0; I < K.D.size(); ++I) { uint64_t Bytes = - endian::byte_swap(K.D[I]); + endian::byte_swap(K.D[I], llvm::endianness::little); VOStream.pwrite(reinterpret_cast(&Bytes), sizeof(uint64_t), K.Pos + I * sizeof(uint64_t)); } diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index 98c56f739ad4e..0c2b74c907d2a 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -642,8 +642,12 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { SmallVector, 8> Ops; VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops); - if (!RI.Reads) + // Defs without reads will be deleted if unused after remat is + // completed for other users of the virtual register. + if (!RI.Reads) { + LLVM_DEBUG(dbgs() << "\tskipping remat of def " << MI); return false; + } SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); @@ -657,8 +661,13 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { return true; } - if (SnippetCopies.count(&MI)) + // Snippets copies are ignored for remat, and will be deleted if they + // don't feed a live user after rematerialization completes. + if (SnippetCopies.count(&MI)) { + LLVM_DEBUG(dbgs() << "\tskipping remat snippet copy for " << UseIdx << '\t' + << MI); return false; + } LiveInterval &OrigLI = LIS.getInterval(Original); VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx); diff --git a/llvm/lib/CodeGen/LiveInterval.cpp b/llvm/lib/CodeGen/LiveInterval.cpp index b682998c329bc..299db85233c2d 100644 --- a/llvm/lib/CodeGen/LiveInterval.cpp +++ b/llvm/lib/CodeGen/LiveInterval.cpp @@ -996,6 +996,17 @@ LLVM_DUMP_METHOD void LiveRange::Segment::dump() const { } #endif +void VNInfo::print(raw_ostream &OS) const { + OS << id << '@'; + if (isUnused()) { + OS << 'x'; + } else { + OS << def; + if (isPHIDef()) + OS << "-phi"; + } +} + void LiveRange::print(raw_ostream &OS) const { if (empty()) OS << "EMPTY"; @@ -1013,15 +1024,10 @@ void LiveRange::print(raw_ostream &OS) const { for (const_vni_iterator i = vni_begin(), e = vni_end(); i != e; ++i, ++vnum) { const VNInfo *vni = *i; - if (vnum) OS << ' '; - OS << vnum << '@'; - if (vni->isUnused()) { - OS << 'x'; - } else { - OS << vni->def; - if (vni->isPHIDef()) - OS << "-phi"; - } + if (vnum) + OS << ' '; + OS << *vni; + assert(vnum == vni->id && "Bad VNInfo"); } } } @@ -1041,9 +1047,9 @@ void LiveInterval::print(raw_ostream &OS) const { } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) -LLVM_DUMP_METHOD void LiveRange::dump() const { - dbgs() << *this << '\n'; -} +LLVM_DUMP_METHOD void VNInfo::dump() const { dbgs() << *this << '\n'; } + +LLVM_DUMP_METHOD void LiveRange::dump() const { dbgs() << *this << '\n'; } LLVM_DUMP_METHOD void LiveInterval::SubRange::dump() const { dbgs() << *this << '\n'; diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp index bc1df26db2684..1cb57a4fa4258 100644 --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -1180,7 +1180,7 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge( MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge( MachineBasicBlock *Succ, const SplitCriticalEdgeAnalyses &Analyses, std::vector> *LiveInSets, MachineDomTreeUpdater *MDTU) { - if (!canSplitCriticalEdge(Succ)) + if (!canSplitCriticalEdge(Succ, Analyses.MLI)) return nullptr; MachineFunction *MF = getParent(); @@ -1408,8 +1408,8 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge( return NMBB; } -bool MachineBasicBlock::canSplitCriticalEdge( - const MachineBasicBlock *Succ) const { +bool MachineBasicBlock::canSplitCriticalEdge(const MachineBasicBlock *Succ, + const MachineLoopInfo *MLI) const { // Splitting the critical edge to a landing pad block is non-trivial. Don't do // it in this generic function. if (Succ->isEHPad()) @@ -1423,8 +1423,17 @@ bool MachineBasicBlock::canSplitCriticalEdge( const MachineFunction *MF = getParent(); // Performance might be harmed on HW that implements branching using exec mask // where both sides of the branches are always executed. - if (MF->getTarget().requiresStructuredCFG()) + + if (MF->getTarget().requiresStructuredCFG()) { + // If `Succ` is a loop header, splitting the critical edge will not + // break structured CFG. + if (MLI) { + const MachineLoop *L = MLI->getLoopFor(Succ); + return L && L->getHeader() == Succ; + } + return false; + } // Do we have an Indirect jump with a jumptable that we can rewrite? int JTI = findJumpTableIndex(*this); diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 7bfc9dae59fcf..fb3e6482bb096 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -1004,9 +1004,8 @@ bool PeepholeOptimizer::findNextSource(const TargetRegisterClass *DefRC, // Thus, instead of maintaining untested code, we will revisit that if // that changes at some point. Register Reg = RegSubReg.Reg; - SmallVector SrcToLook; RegSubRegPair CurSrcPair = RegSubReg; - SrcToLook.push_back(CurSrcPair); + SmallVector SrcToLook = {CurSrcPair}; unsigned PHICount = 0; do { diff --git a/llvm/lib/CodeGen/RDFLiveness.cpp b/llvm/lib/CodeGen/RDFLiveness.cpp index 318422b46e811..2e1cf499eab41 100644 --- a/llvm/lib/CodeGen/RDFLiveness.cpp +++ b/llvm/lib/CodeGen/RDFLiveness.cpp @@ -652,8 +652,9 @@ void Liveness::computePhiInfo() { // defs, cache the result of subtracting these defs from a given register // ref. using RefHash = std::hash; - using RefEqual = std::equal_to; - using SubMap = std::unordered_map; + using RefEqual = RegisterRefEqualTo; + using SubMap = + std::unordered_map; std::unordered_map Subs; auto ClearIn = [](RegisterRef RR, const RegisterAggr &Mid, SubMap &SM) { if (Mid.empty()) @@ -868,7 +869,7 @@ void Liveness::computeLiveIns() { std::vector LV; for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins()) LV.push_back(RegisterRef(LI.PhysReg, LI.LaneMask)); - llvm::sort(LV, std::less(PRI)); + llvm::sort(LV, RegisterRefLess(PRI)); dbgs() << printMBBReference(B) << "\t rec = {"; for (auto I : LV) dbgs() << ' ' << Print(I, DFG); @@ -878,7 +879,7 @@ void Liveness::computeLiveIns() { LV.clear(); for (RegisterRef RR : LiveMap[&B].refs()) LV.push_back(RR); - llvm::sort(LV, std::less(PRI)); + llvm::sort(LV, RegisterRefLess(PRI)); dbgs() << "\tcomp = {"; for (auto I : LV) dbgs() << ' ' << Print(I, DFG); diff --git a/llvm/lib/CodeGen/RegAllocGreedy.cpp b/llvm/lib/CodeGen/RegAllocGreedy.cpp index d004815d2c17a..8e6cf3e6b51b3 100644 --- a/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -1383,21 +1383,37 @@ bool RAGreedy::trySplitAroundHintReg(MCPhysReg Hint, // Compute the cost of assigning a non Hint physical register to VirtReg. // We define it as the total frequency of broken COPY instructions to/from // Hint register, and after split, they can be deleted. - for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) { - if (!TII->isFullCopyInstr(Instr)) + + // FIXME: This is miscounting the costs with subregisters. In particular, this + // should support recognizing SplitKit formed copy bundles instead of direct + // copy instructions, which will appear in the same block. + for (const MachineOperand &Opnd : MRI->reg_nodbg_operands(Reg)) { + const MachineInstr &Instr = *Opnd.getParent(); + if (!Instr.isCopy() || Opnd.isImplicit()) continue; - Register OtherReg = Instr.getOperand(1).getReg(); - if (OtherReg == Reg) { - OtherReg = Instr.getOperand(0).getReg(); - if (OtherReg == Reg) - continue; - // Check if VirtReg interferes with OtherReg after this COPY instruction. - if (VirtReg.liveAt(LIS->getInstructionIndex(Instr).getRegSlot())) - continue; - } + + // Look for the other end of the copy. + const bool IsDef = Opnd.isDef(); + const MachineOperand &OtherOpnd = Instr.getOperand(IsDef); + Register OtherReg = OtherOpnd.getReg(); + assert(Reg == Opnd.getReg()); + if (OtherReg == Reg) + continue; + + unsigned SubReg = Opnd.getSubReg(); + unsigned OtherSubReg = OtherOpnd.getSubReg(); + if (SubReg && OtherSubReg && SubReg != OtherSubReg) + continue; + + // Check if VirtReg interferes with OtherReg after this COPY instruction. + if (!IsDef && VirtReg.liveAt(LIS->getInstructionIndex(Instr).getRegSlot())) + continue; + MCRegister OtherPhysReg = OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg); - if (OtherPhysReg == Hint) + MCRegister ThisHint = + SubReg ? TRI->getSubReg(Hint, SubReg) : MCRegister(Hint); + if (OtherPhysReg == ThisHint) Cost += MBFI->getBlockFreq(Instr.getParent()); } diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index e3b6bfc03f467..7ac1aef83777a 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -1475,10 +1475,7 @@ bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP, // // The implicit-def of the super register may have been reduced to // subregisters depending on the uses. - - bool NewMIDefinesFullReg = false; - - SmallVector NewMIImplDefs; + SmallVector, 4> NewMIImplDefs; for (unsigned i = NewMI.getDesc().getNumOperands(), e = NewMI.getNumOperands(); i != e; ++i) { @@ -1486,9 +1483,6 @@ bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP, if (MO.isReg() && MO.isDef()) { assert(MO.isImplicit()); if (MO.getReg().isPhysical()) { - if (MO.getReg() == DstReg) - NewMIDefinesFullReg = true; - assert(MO.isImplicit() && MO.getReg().isPhysical() && (MO.isDead() || (DefSubIdx && @@ -1496,7 +1490,7 @@ bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP, MCRegister((unsigned)NewMI.getOperand(0).getReg())) || TRI->isSubRegisterEq(NewMI.getOperand(0).getReg(), MO.getReg()))))); - NewMIImplDefs.push_back(MO.getReg().asMCReg()); + NewMIImplDefs.push_back({i, MO.getReg()}); } else { assert(MO.getReg() == NewMI.getOperand(0).getReg()); @@ -1641,12 +1635,30 @@ bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP, // been asked for. If so it must implicitly define the whole thing. assert(DstReg.isPhysical() && "Only expect virtual or physical registers in remat"); + + // When we're rematerializing into a not-quite-right register we already add + // the real definition as an implicit-def, but we should also be marking the + // "official" register as dead, since nothing else is going to use it as a + // result of this remat. Not doing this can affect pressure tracking. NewMI.getOperand(0).setIsDead(true); - if (!NewMIDefinesFullReg) { + bool HasDefMatchingCopy = false; + for (auto [OpIndex, Reg] : NewMIImplDefs) { + if (Reg != DstReg) + continue; + // Also, if CopyDstReg is a sub-register of DstReg (and it is defined), we + // must mark DstReg as dead since it is not going to used as a result of + // this remat. + if (DstReg != CopyDstReg) + NewMI.getOperand(OpIndex).setIsDead(true); + else + HasDefMatchingCopy = true; + } + + // If NewMI does not already have an implicit-def CopyDstReg add one now. + if (!HasDefMatchingCopy) NewMI.addOperand(MachineOperand::CreateReg( CopyDstReg, true /*IsDef*/, true /*IsImp*/, false /*IsKill*/)); - } // Record small dead def live-ranges for all the subregisters // of the destination register. @@ -1677,8 +1689,8 @@ bool RegisterCoalescer::reMaterializeDef(const CoalescerPair &CP, NewMI.addOperand(MO); SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI); - for (MCRegister Reg : NewMIImplDefs) { - for (MCRegUnit Unit : TRI->regunits(Reg)) + for (Register Reg : make_second_range(NewMIImplDefs)) { + for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) if (LiveRange *LR = LIS->getCachedRegUnit(Unit)) LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator()); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 7aa293af963e6..8fc7eabf90ea8 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -11161,8 +11161,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, APFloat FrexpMant = frexp(C->getValueAPF(), FrexpExp, APFloat::rmNearestTiesToEven); SDValue Result0 = getConstantFP(FrexpMant, DL, VTList.VTs[0]); - SDValue Result1 = - getConstant(FrexpMant.isFinite() ? FrexpExp : 0, DL, VTList.VTs[1]); + SDValue Result1 = getSignedConstant(FrexpMant.isFinite() ? FrexpExp : 0, + DL, VTList.VTs[1]); return getNode(ISD::MERGE_VALUES, DL, VTList, {Result0, Result1}, Flags); } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 4145c8a54a6fe..cc503d324e74b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -7492,7 +7492,6 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, // Pre-increment recursion depth for use in recursive calls. ++Depth; const SDNodeFlags Flags = Op->getFlags(); - const TargetOptions &Options = DAG.getTarget().Options; EVT VT = Op.getValueType(); unsigned Opcode = Op.getOpcode(); @@ -7572,7 +7571,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, return DAG.getBuildVector(VT, DL, Ops); } case ISD::FADD: { - if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) + if (!Flags.hasNoSignedZeros()) break; // After operation legalization, it might not be legal to create new FSUBs. @@ -7617,7 +7616,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, } case ISD::FSUB: { // We can't turn -(A-B) into B-A when we honor signed zeros. - if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) + if (!Flags.hasNoSignedZeros()) break; SDValue X = Op.getOperand(0), Y = Op.getOperand(1); @@ -7678,7 +7677,7 @@ SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, } case ISD::FMA: case ISD::FMAD: { - if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) + if (!Flags.hasNoSignedZeros()) break; SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); @@ -8797,7 +8796,6 @@ SDValue TargetLowering::expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *Node, EVT VT = Node->getValueType(0); EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); bool IsMax = Opc == ISD::FMAXIMUMNUM; - const TargetOptions &Options = DAG.getTarget().Options; SDNodeFlags Flags = Node->getFlags(); unsigned NewOp = @@ -8839,7 +8837,9 @@ SDValue TargetLowering::expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *Node, return DAG.getNode(IEEE2008Op, DL, VT, LHS, RHS, Flags); } - if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) + if (VT.isVector() && + (isOperationLegalOrCustomOrPromote(Opc, VT.getVectorElementType()) || + !isOperationLegalOrCustom(ISD::VSELECT, VT))) return DAG.UnrollVectorOp(Node); // If only one operand is NaN, override it with another operand. @@ -8856,8 +8856,8 @@ SDValue TargetLowering::expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *Node, // TODO: We need quiet sNaN if strictfp. // Fixup signed zero behavior. - if (Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros() || - DAG.isKnownNeverZeroFloat(LHS) || DAG.isKnownNeverZeroFloat(RHS)) { + if (Flags.hasNoSignedZeros() || DAG.isKnownNeverZeroFloat(LHS) || + DAG.isKnownNeverZeroFloat(RHS)) { return MinMax; } SDValue TestZero = diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp index 2e473c6c4e97f..c9e46182decc2 100644 --- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -67,7 +67,8 @@ bool TargetRegisterInfo::shouldRegionSplitForVirtReg( const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); const MachineRegisterInfo &MRI = MF.getRegInfo(); MachineInstr *MI = MRI.getUniqueVRegDef(VirtReg.reg()); - if (MI && TII->isReMaterializable(*MI) && VirtReg.size() > HugeSizeForSplit) + if (MI && TII->isTriviallyReMaterializable(*MI) && + VirtReg.size() > HugeSizeForSplit) return false; return true; } diff --git a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp index f29f2c7602fc6..5785505ce2b0c 100644 --- a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp +++ b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp @@ -27,6 +27,11 @@ namespace rootsig { char GenericRSMetadataError::ID; char InvalidRSMetadataFormat::ID; char InvalidRSMetadataValue::ID; +char TableSamplerMixinError::ID; +char ShaderRegisterOverflowError::ID; +char OffsetOverflowError::ID; +char OffsetAppendAfterOverflow::ID; + template char RootSignatureValidationError::ID; static std::optional extractMdIntValue(MDNode *Node, @@ -55,8 +60,9 @@ static std::optional extractMdStringValue(MDNode *Node, template && std::is_same_v, uint32_t>>> -Expected extractEnumValue(MDNode *Node, unsigned int OpId, StringRef ErrText, - llvm::function_ref VerifyFn) { +static Expected +extractEnumValue(MDNode *Node, unsigned int OpId, StringRef ErrText, + llvm::function_ref VerifyFn) { if (std::optional Val = extractMdIntValue(Node, OpId)) { if (!VerifyFn(*Val)) return make_error>(ErrText, *Val); @@ -538,6 +544,60 @@ Error MetadataParser::parseRootSignatureElement(mcdxbc::RootSignatureDesc &RSD, llvm_unreachable("Unhandled RootSignatureElementKind enum."); } +static Error +validateDescriptorTableSamplerMixin(const mcdxbc::DescriptorTable &Table, + uint32_t Location) { + dxil::ResourceClass CurrRC = dxil::ResourceClass::Sampler; + for (const mcdxbc::DescriptorRange &Range : Table.Ranges) { + if (Range.RangeType == dxil::ResourceClass::Sampler && + CurrRC != dxil::ResourceClass::Sampler) + return make_error(CurrRC, Location); + CurrRC = Range.RangeType; + } + return Error::success(); +} + +static Error +validateDescriptorTableRegisterOverflow(const mcdxbc::DescriptorTable &Table, + uint32_t Location) { + uint64_t Offset = 0; + bool IsPrevUnbound = false; + for (const mcdxbc::DescriptorRange &Range : Table.Ranges) { + // Validation of NumDescriptors should have happened by this point. + if (Range.NumDescriptors == 0) + continue; + + const uint64_t RangeBound = llvm::hlsl::rootsig::computeRangeBound( + Range.BaseShaderRegister, Range.NumDescriptors); + + if (!verifyNoOverflowedOffset(RangeBound)) + return make_error( + Range.RangeType, Range.BaseShaderRegister, Range.RegisterSpace); + + bool IsAppending = + Range.OffsetInDescriptorsFromTableStart == DescriptorTableOffsetAppend; + if (!IsAppending) + Offset = Range.OffsetInDescriptorsFromTableStart; + + if (IsPrevUnbound && IsAppending) + return make_error( + Range.RangeType, Range.BaseShaderRegister, Range.RegisterSpace); + + const uint64_t OffsetBound = + llvm::hlsl::rootsig::computeRangeBound(Offset, Range.NumDescriptors); + + if (!verifyNoOverflowedOffset(OffsetBound)) + return make_error( + Range.RangeType, Range.BaseShaderRegister, Range.RegisterSpace); + + Offset = OffsetBound + 1; + IsPrevUnbound = + Range.NumDescriptors == llvm::hlsl::rootsig::NumDescriptorsUnbounded; + } + + return Error::success(); +} + Error MetadataParser::validateRootSignature( const mcdxbc::RootSignatureDesc &RSD) { Error DeferredErrs = Error::success(); @@ -611,6 +671,14 @@ Error MetadataParser::validateRootSignature( joinErrors(std::move(DeferredErrs), make_error>( "DescriptorFlag", Range.Flags)); + + if (Error Err = + validateDescriptorTableSamplerMixin(Table, Info.Location)) + DeferredErrs = joinErrors(std::move(DeferredErrs), std::move(Err)); + + if (Error Err = + validateDescriptorTableRegisterOverflow(Table, Info.Location)) + DeferredErrs = joinErrors(std::move(DeferredErrs), std::move(Err)); } break; } @@ -651,6 +719,12 @@ Error MetadataParser::validateRootSignature( joinErrors(std::move(DeferredErrs), make_error>( "RegisterSpace", Sampler.RegisterSpace)); + + if (!hlsl::rootsig::verifyStaticSamplerFlags(RSD.Version, Sampler.Flags)) + DeferredErrs = + joinErrors(std::move(DeferredErrs), + make_error>( + "Static Sampler Flag", Sampler.Flags)); } return DeferredErrs; diff --git a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp index c2c3bf6d1b8dc..2c78d622f7f28 100644 --- a/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp +++ b/llvm/lib/Frontend/HLSL/RootSignatureValidations.cpp @@ -20,7 +20,9 @@ namespace rootsig { bool verifyRootFlag(uint32_t Flags) { return (Flags & ~0xfff) == 0; } -bool verifyVersion(uint32_t Version) { return (Version == 1 || Version == 2); } +bool verifyVersion(uint32_t Version) { + return (Version == 1 || Version == 2 || Version == 3); +} bool verifyRegisterValue(uint32_t RegisterValue) { return RegisterValue != ~0U; @@ -111,6 +113,25 @@ bool verifyDescriptorRangeFlag(uint32_t Version, dxil::ResourceClass Type, return (Flags & ~Mask) == FlagT::None; } +bool verifyStaticSamplerFlags(uint32_t Version, uint32_t FlagsNumber) { + uint32_t LargestValue = llvm::to_underlying( + dxbc::StaticSamplerFlags::LLVM_BITMASK_LARGEST_ENUMERATOR); + if (FlagsNumber >= NextPowerOf2(LargestValue)) + return false; + + dxbc::StaticSamplerFlags Flags = dxbc::StaticSamplerFlags(FlagsNumber); + if (Version <= 2) + return Flags == dxbc::StaticSamplerFlags::None; + + assert(Version == 3 && "Provided invalid root signature version"); + + dxbc::StaticSamplerFlags Mask = + dxbc::StaticSamplerFlags::NonNormalizedCoordinates | + dxbc::StaticSamplerFlags::UintBorderColor | + dxbc::StaticSamplerFlags::None; + return (Flags | Mask) == Mask; +} + bool verifyNumDescriptors(uint32_t NumDescriptors) { return NumDescriptors > 0; } @@ -136,7 +157,6 @@ uint64_t computeRangeBound(uint64_t Offset, uint32_t Size) { return Offset + uint64_t(Size) - 1; } - } // namespace rootsig } // namespace hlsl } // namespace llvm diff --git a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp index cfddc06fbc00b..c4aa2c7638450 100644 --- a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp +++ b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp @@ -8,19 +8,32 @@ #include "llvm/Frontend/Offloading/OffloadWrapper.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/Twine.h" #include "llvm/BinaryFormat/Magic.h" #include "llvm/Frontend/Offloading/Utility.h" #include "llvm/IR/Constants.h" +#include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" #include "llvm/Object/OffloadBinary.h" #include "llvm/Support/Error.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/LineIterator.h" +#include "llvm/Support/MemoryBufferRef.h" #include "llvm/TargetParser/Triple.h" #include "llvm/Transforms/Utils/ModuleUtils.h" +#include +#include +#include + using namespace llvm; +using namespace llvm::object; using namespace llvm::offloading; namespace { @@ -620,6 +633,384 @@ void createRegisterFatbinFunction(Module &M, GlobalVariable *FatbinDesc, // Add this function to constructors. appendToGlobalCtors(M, CtorFunc, /*Priority=*/101); } + +/// SYCLWrapper helper class that creates all LLVM IRs wrapping given images. +struct SYCLWrapper { + Module &M; + LLVMContext &C; + SYCLJITOptions Options; + + StructType *EntryTy = nullptr; + StructType *SyclDeviceImageTy = nullptr; + StructType *SyclBinDescTy = nullptr; + + SYCLWrapper(Module &M, const SYCLJITOptions &Options) + : M(M), C(M.getContext()), Options(Options) { + EntryTy = offloading::getEntryTy(M); + SyclDeviceImageTy = getSyclDeviceImageTy(); + SyclBinDescTy = getSyclBinDescTy(); + } + + IntegerType *getSizeTTy() { + switch (M.getDataLayout().getPointerSize()) { + case 4: + return Type::getInt32Ty(C); + case 8: + return Type::getInt64Ty(C); + } + llvm_unreachable("unsupported pointer type size"); + } + + SmallVector getSizetConstPair(size_t First, size_t Second) { + IntegerType *SizeTTy = getSizeTTy(); + return SmallVector{ConstantInt::get(SizeTTy, First), + ConstantInt::get(SizeTTy, Second)}; + } + + /// Note: Properties aren't supported and the support is going + /// to be added later. + /// Creates a structure corresponding to: + /// SYCL specific image descriptor type. + /// \code + /// struct __sycl.tgt_device_image { + /// // version of this structure - for backward compatibility; + /// // all modifications which change order/type/offsets of existing fields + /// // should increment the version. + /// uint16_t Version; + /// // the kind of offload model the image employs. + /// uint8_t OffloadKind; + /// // format of the image data - SPIRV, LLVMIR bitcode, etc + /// uint8_t Format; + /// // null-terminated string representation of the device's target + /// // architecture + /// const char *Arch; + /// // a null-terminated string; target- and compiler-specific options + /// // which are suggested to use to "compile" program at runtime + /// const char *CompileOptions; + /// // a null-terminated string; target- and compiler-specific options + /// // which are suggested to use to "link" program at runtime + /// const char *LinkOptions; + /// // Pointer to the device binary image start + /// void *ImageStart; + /// // Pointer to the device binary image end + /// void *ImageEnd; + /// // the entry table + /// __tgt_offload_entry *EntriesBegin; + /// __tgt_offload_entry *EntriesEnd; + /// const char *PropertiesBegin; + /// const char *PropertiesEnd; + /// }; + /// \endcode + StructType *getSyclDeviceImageTy() { + return StructType::create( + { + Type::getInt16Ty(C), // Version + Type::getInt8Ty(C), // OffloadKind + Type::getInt8Ty(C), // Format + PointerType::getUnqual(C), // Arch + PointerType::getUnqual(C), // CompileOptions + PointerType::getUnqual(C), // LinkOptions + PointerType::getUnqual(C), // ImageStart + PointerType::getUnqual(C), // ImageEnd + PointerType::getUnqual(C), // EntriesBegin + PointerType::getUnqual(C), // EntriesEnd + PointerType::getUnqual(C), // PropertiesBegin + PointerType::getUnqual(C) // PropertiesEnd + }, + "__sycl.tgt_device_image"); + } + + /// Creates a structure for SYCL specific binary descriptor type. Corresponds + /// to: + /// + /// \code + /// struct __sycl.tgt_bin_desc { + /// // version of this structure - for backward compatibility; + /// // all modifications which change order/type/offsets of existing fields + /// // should increment the version. + /// uint16_t Version; + /// uint16_t NumDeviceImages; + /// __sycl.tgt_device_image *DeviceImages; + /// // the offload entry table + /// __tgt_offload_entry *HostEntriesBegin; + /// __tgt_offload_entry *HostEntriesEnd; + /// }; + /// \endcode + StructType *getSyclBinDescTy() { + return StructType::create( + {Type::getInt16Ty(C), Type::getInt16Ty(C), PointerType::getUnqual(C), + PointerType::getUnqual(C), PointerType::getUnqual(C)}, + "__sycl.tgt_bin_desc"); + } + + /// Adds a global readonly variable that is initialized by given + /// \p Initializer to the module. + GlobalVariable *addGlobalArrayVariable(const Twine &Name, + ArrayRef Initializer, + const Twine &Section = "") { + auto *Arr = ConstantDataArray::get(M.getContext(), Initializer); + auto *Var = new GlobalVariable(M, Arr->getType(), /*isConstant*/ true, + GlobalVariable::InternalLinkage, Arr, Name); + Var->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); + + SmallVector NameBuf; + auto SectionName = Section.toStringRef(NameBuf); + if (!SectionName.empty()) + Var->setSection(SectionName); + return Var; + } + + /// Adds given \p Buf as a global variable into the module. + /// \returns Pair of pointers that point at the beginning and the end of the + /// variable. + std::pair + addArrayToModule(ArrayRef Buf, const Twine &Name, + const Twine &Section = "") { + auto *Var = addGlobalArrayVariable(Name, Buf, Section); + auto *ImageB = ConstantExpr::getGetElementPtr(Var->getValueType(), Var, + getSizetConstPair(0, 0)); + auto *ImageE = ConstantExpr::getGetElementPtr( + Var->getValueType(), Var, getSizetConstPair(0, Buf.size())); + return std::make_pair(ImageB, ImageE); + } + + /// Adds given \p Data as constant byte array in the module. + /// \returns Constant pointer to the added data. The pointer type does not + /// carry size information. + Constant *addRawDataToModule(ArrayRef Data, const Twine &Name) { + auto *Var = addGlobalArrayVariable(Name, Data); + auto *DataPtr = ConstantExpr::getGetElementPtr(Var->getValueType(), Var, + getSizetConstPair(0, 0)); + return DataPtr; + } + + /// Creates a global variable of const char* type and creates an + /// initializer that initializes it with \p Str. + /// + /// \returns Link-time constant pointer (constant expr) to that + /// variable. + Constant *addStringToModule(StringRef Str, const Twine &Name) { + auto *Arr = ConstantDataArray::getString(C, Str); + auto *Var = new GlobalVariable(M, Arr->getType(), /*isConstant*/ true, + GlobalVariable::InternalLinkage, Arr, Name); + Var->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); + auto *Zero = ConstantInt::get(getSizeTTy(), 0); + Constant *ZeroZero[] = {Zero, Zero}; + return ConstantExpr::getGetElementPtr(Var->getValueType(), Var, ZeroZero); + } + + /// Each image contains its own set of symbols, which may contain different + /// symbols than other images. This function constructs an array of + /// symbol entries for a particular image. + /// + /// \returns Pointers to the beginning and end of the array. + std::pair + initOffloadEntriesPerImage(StringRef Entries, const Twine &OffloadKindTag) { + SmallVector EntriesInits; + std::unique_ptr MB = MemoryBuffer::getMemBuffer( + Entries, /*BufferName*/ "", /*RequiresNullTerminator*/ false); + for (line_iterator LI(*MB); !LI.is_at_eof(); ++LI) { + GlobalVariable *GV = + emitOffloadingEntry(M, /*Kind*/ OffloadKind::OFK_SYCL, + Constant::getNullValue(PointerType::getUnqual(C)), + /*Name*/ *LI, /*Size*/ 0, + /*Flags*/ 0, /*Data*/ 0); + EntriesInits.push_back(GV->getInitializer()); + } + + auto *Arr = ConstantArray::get(ArrayType::get(EntryTy, EntriesInits.size()), + EntriesInits); + auto *EntriesGV = new GlobalVariable(M, Arr->getType(), /*isConstant*/ true, + GlobalVariable::InternalLinkage, Arr, + OffloadKindTag + "entries_arr"); + + auto *EntriesB = ConstantExpr::getGetElementPtr( + EntriesGV->getValueType(), EntriesGV, getSizetConstPair(0, 0)); + auto *EntriesE = ConstantExpr::getGetElementPtr( + EntriesGV->getValueType(), EntriesGV, + getSizetConstPair(0, EntriesInits.size())); + return std::make_pair(EntriesB, EntriesE); + } + + Constant *wrapImage(const OffloadBinary &OB, const Twine &ImageID, + StringRef OffloadKindTag) { + // Note: Intel DPC++ compiler had 2 versions of this structure + // and clang++ has a third different structure. To avoid ABI incompatibility + // between generated device images the Version here starts from 3. + constexpr uint16_t DeviceImageStructVersion = 3; + Constant *Version = + ConstantInt::get(Type::getInt16Ty(C), DeviceImageStructVersion); + Constant *OffloadKindConstant = ConstantInt::get( + Type::getInt8Ty(C), static_cast(OB.getOffloadKind())); + Constant *ImageKindConstant = ConstantInt::get( + Type::getInt8Ty(C), static_cast(OB.getImageKind())); + StringRef Triple = OB.getString("triple"); + Constant *TripleConstant = + addStringToModule(Triple, Twine(OffloadKindTag) + "target." + ImageID); + Constant *CompileOptions = + addStringToModule(Options.CompileOptions, + Twine(OffloadKindTag) + "opts.compile." + ImageID); + Constant *LinkOptions = addStringToModule( + Options.LinkOptions, Twine(OffloadKindTag) + "opts.link." + ImageID); + + // Note: NULL for now. + std::pair PropertiesConstants = { + Constant::getNullValue(PointerType::getUnqual(C)), + Constant::getNullValue(PointerType::getUnqual(C))}; + + StringRef RawImage = OB.getImage(); + std::pair Binary = addArrayToModule( + ArrayRef(RawImage.begin(), RawImage.end()), + Twine(OffloadKindTag) + ImageID + ".data", ".llvm.offloading"); + + // For SYCL images offload entries are defined here per image. + std::pair ImageEntriesPtrs = + initOffloadEntriesPerImage(OB.getString("symbols"), OffloadKindTag); + Constant *WrappedBinary = ConstantStruct::get( + SyclDeviceImageTy, Version, OffloadKindConstant, ImageKindConstant, + TripleConstant, CompileOptions, LinkOptions, Binary.first, + Binary.second, ImageEntriesPtrs.first, ImageEntriesPtrs.second, + PropertiesConstants.first, PropertiesConstants.second); + + return WrappedBinary; + } + + GlobalVariable *combineWrappedImages(ArrayRef WrappedImages, + StringRef OffloadKindTag) { + auto *ImagesData = ConstantArray::get( + ArrayType::get(SyclDeviceImageTy, WrappedImages.size()), WrappedImages); + auto *ImagesGV = + new GlobalVariable(M, ImagesData->getType(), /*isConstant*/ true, + GlobalValue::InternalLinkage, ImagesData, + Twine(OffloadKindTag) + "device_images"); + ImagesGV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); + + auto *Zero = ConstantInt::get(getSizeTTy(), 0); + Constant *ZeroZero[] = {Zero, Zero}; + auto *ImagesB = ConstantExpr::getGetElementPtr(ImagesGV->getValueType(), + ImagesGV, ZeroZero); + + Constant *EntriesB = Constant::getNullValue(PointerType::getUnqual(C)); + Constant *EntriesE = Constant::getNullValue(PointerType::getUnqual(C)); + static constexpr uint16_t BinDescStructVersion = 1; + auto *DescInit = ConstantStruct::get( + SyclBinDescTy, + ConstantInt::get(Type::getInt16Ty(C), BinDescStructVersion), + ConstantInt::get(Type::getInt16Ty(C), WrappedImages.size()), ImagesB, + EntriesB, EntriesE); + + return new GlobalVariable(M, DescInit->getType(), /*isConstant*/ true, + GlobalValue::InternalLinkage, DescInit, + Twine(OffloadKindTag) + "descriptor"); + } + + /// Creates binary descriptor for the given device images. Binary descriptor + /// is an object that is passed to the offloading runtime at program startup + /// and it describes all device images available in the executable or shared + /// library. It is defined as follows: + /// + /// \code + /// __attribute__((visibility("hidden"))) + /// __tgt_offload_entry *__sycl_offload_entries_arr0[]; + /// ... + /// __attribute__((visibility("hidden"))) + /// __tgt_offload_entry *__sycl_offload_entries_arrN[]; + /// + /// __attribute__((visibility("hidden"))) + /// extern const char *CompileOptions = "..."; + /// ... + /// __attribute__((visibility("hidden"))) + /// extern const char *LinkOptions = "..."; + /// ... + /// + /// static const char Image0[] = { ... }; + /// ... + /// static const char ImageN[] = { ... }; + /// + /// static const __sycl.tgt_device_image Images[] = { + /// { + /// Version, // Version + /// OffloadKind, // OffloadKind + /// Format, // Format of the image. + // TripleString, // Arch + /// CompileOptions, // CompileOptions + /// LinkOptions, // LinkOptions + /// Image0, // ImageStart + /// Image0 + IMAGE0_SIZE, // ImageEnd + /// __sycl_offload_entries_arr0, // EntriesBegin + /// __sycl_offload_entries_arr0 + ENTRIES0_SIZE, // EntriesEnd + /// NULL, // PropertiesBegin + /// NULL, // PropertiesEnd + /// }, + /// ... + /// }; + /// + /// static const __sycl.tgt_bin_desc FatbinDesc = { + /// Version, //Version + /// sizeof(Images) / sizeof(Images[0]), //NumDeviceImages + /// Images, //DeviceImages + /// NULL, //HostEntriesBegin + /// NULL //HostEntriesEnd + /// }; + /// \endcode + /// + /// \returns Global variable that represents FatbinDesc. + GlobalVariable *createFatbinDesc(ArrayRef OffloadFiles) { + StringRef OffloadKindTag = ".sycl_offloading."; + SmallVector WrappedImages; + WrappedImages.reserve(OffloadFiles.size()); + for (size_t I = 0, E = OffloadFiles.size(); I != E; ++I) + WrappedImages.push_back( + wrapImage(*OffloadFiles[I].getBinary(), Twine(I), OffloadKindTag)); + + return combineWrappedImages(WrappedImages, OffloadKindTag); + } + + void createRegisterFatbinFunction(GlobalVariable *FatbinDesc) { + auto *FuncTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg*/ false); + auto *Func = Function::Create(FuncTy, GlobalValue::InternalLinkage, + Twine("sycl") + ".descriptor_reg", &M); + Func->setSection(".text.startup"); + + // Get RegFuncName function declaration. + auto *RegFuncTy = + FunctionType::get(Type::getVoidTy(C), PointerType::getUnqual(C), + /*isVarArg=*/false); + FunctionCallee RegFuncC = + M.getOrInsertFunction("__sycl_register_lib", RegFuncTy); + + // Construct function body + IRBuilder Builder(BasicBlock::Create(C, "entry", Func)); + Builder.CreateCall(RegFuncC, FatbinDesc); + Builder.CreateRetVoid(); + + // Add this function to constructors. + appendToGlobalCtors(M, Func, /*Priority*/ 1); + } + + void createUnregisterFunction(GlobalVariable *FatbinDesc) { + auto *FuncTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg*/ false); + auto *Func = Function::Create(FuncTy, GlobalValue::InternalLinkage, + "sycl.descriptor_unreg", &M); + Func->setSection(".text.startup"); + + // Get UnregFuncName function declaration. + auto *UnRegFuncTy = + FunctionType::get(Type::getVoidTy(C), PointerType::getUnqual(C), + /*isVarArg=*/false); + FunctionCallee UnRegFuncC = + M.getOrInsertFunction("__sycl_unregister_lib", UnRegFuncTy); + + // Construct function body + IRBuilder<> Builder(BasicBlock::Create(C, "entry", Func)); + Builder.CreateCall(UnRegFuncC, FatbinDesc); + Builder.CreateRetVoid(); + + // Add this function to global destructors. + appendToGlobalDtors(M, Func, /*Priority*/ 1); + } +}; // end of SYCLWrapper + } // namespace Error offloading::wrapOpenMPBinaries(Module &M, ArrayRef> Images, @@ -660,3 +1051,22 @@ Error offloading::wrapHIPBinary(Module &M, ArrayRef Image, EmitSurfacesAndTextures); return Error::success(); } + +Error llvm::offloading::wrapSYCLBinaries(llvm::Module &M, ArrayRef Buffer, + SYCLJITOptions Options) { + SYCLWrapper W(M, Options); + MemoryBufferRef MBR(StringRef(Buffer.begin(), Buffer.size()), + /*Identifier*/ ""); + SmallVector OffloadFiles; + if (Error E = extractOffloadBinaries(MBR, OffloadFiles)) + return E; + + GlobalVariable *Desc = W.createFatbinDesc(OffloadFiles); + if (!Desc) + return createStringError(inconvertibleErrorCode(), + "No binary descriptors created."); + + W.createRegisterFatbinFunction(Desc); + W.createUnregisterFunction(Desc); + return Error::success(); +} diff --git a/llvm/lib/Frontend/Offloading/Utility.cpp b/llvm/lib/Frontend/Offloading/Utility.cpp index 4b75307ca0b6c..5000488a52f37 100644 --- a/llvm/lib/Frontend/Offloading/Utility.cpp +++ b/llvm/lib/Frontend/Offloading/Utility.cpp @@ -82,11 +82,11 @@ offloading::getOffloadingEntryInitializer(Module &M, object::OffloadKind Kind, return {EntryInitializer, Str}; } -void offloading::emitOffloadingEntry(Module &M, object::OffloadKind Kind, - Constant *Addr, StringRef Name, - uint64_t Size, uint32_t Flags, - uint64_t Data, Constant *AuxAddr, - StringRef SectionName) { +GlobalVariable * +offloading::emitOffloadingEntry(Module &M, object::OffloadKind Kind, + Constant *Addr, StringRef Name, uint64_t Size, + uint32_t Flags, uint64_t Data, + Constant *AuxAddr, StringRef SectionName) { const llvm::Triple &Triple = M.getTargetTriple(); auto [EntryInitializer, NameGV] = getOffloadingEntryInitializer( @@ -106,6 +106,7 @@ void offloading::emitOffloadingEntry(Module &M, object::OffloadKind Kind, else Entry->setSection(SectionName); Entry->setAlignment(Align(object::OffloadBinary::getAlignment())); + return Entry; } std::pair diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 9b67465faab0b..5980ee35a5cd2 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -4979,7 +4979,7 @@ static void createTargetLoopWorkshareCall(OpenMPIRBuilder *OMPBuilder, WorksharingLoopType LoopType, BasicBlock *InsertBlock, Value *Ident, Value *LoopBodyArg, Value *TripCount, - Function &LoopBodyFn) { + Function &LoopBodyFn, bool NoLoop) { Type *TripCountTy = TripCount->getType(); Module &M = OMPBuilder->M; IRBuilder<> &Builder = OMPBuilder->Builder; @@ -5007,8 +5007,10 @@ static void createTargetLoopWorkshareCall(OpenMPIRBuilder *OMPBuilder, RealArgs.push_back(ConstantInt::get(TripCountTy, 0)); if (LoopType == WorksharingLoopType::DistributeForStaticLoop) { RealArgs.push_back(ConstantInt::get(TripCountTy, 0)); + RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), NoLoop)); + } else { + RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), 0)); } - RealArgs.push_back(ConstantInt::get(Builder.getInt8Ty(), 0)); Builder.CreateCall(RTLFn, RealArgs); } @@ -5016,7 +5018,7 @@ static void createTargetLoopWorkshareCall(OpenMPIRBuilder *OMPBuilder, static void workshareLoopTargetCallback( OpenMPIRBuilder *OMPIRBuilder, CanonicalLoopInfo *CLI, Value *Ident, Function &OutlinedFn, const SmallVector &ToBeDeleted, - WorksharingLoopType LoopType) { + WorksharingLoopType LoopType, bool NoLoop) { IRBuilder<> &Builder = OMPIRBuilder->Builder; BasicBlock *Preheader = CLI->getPreheader(); Value *TripCount = CLI->getTripCount(); @@ -5063,17 +5065,16 @@ static void workshareLoopTargetCallback( OutlinedFnCallInstruction->eraseFromParent(); createTargetLoopWorkshareCall(OMPIRBuilder, LoopType, Preheader, Ident, - LoopBodyArg, TripCount, OutlinedFn); + LoopBodyArg, TripCount, OutlinedFn, NoLoop); for (auto &ToBeDeletedItem : ToBeDeleted) ToBeDeletedItem->eraseFromParent(); CLI->invalidate(); } -OpenMPIRBuilder::InsertPointTy -OpenMPIRBuilder::applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI, - InsertPointTy AllocaIP, - WorksharingLoopType LoopType) { +OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyWorkshareLoopTarget( + DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, + WorksharingLoopType LoopType, bool NoLoop) { uint32_t SrcLocStrSize; Constant *SrcLocStr = getOrCreateSrcLocStr(DL, SrcLocStrSize); Value *Ident = getOrCreateIdent(SrcLocStr, SrcLocStrSize); @@ -5156,7 +5157,7 @@ OpenMPIRBuilder::applyWorkshareLoopTarget(DebugLoc DL, CanonicalLoopInfo *CLI, OI.PostOutlineCB = [=, ToBeDeletedVec = std::move(ToBeDeleted)](Function &OutlinedFn) { workshareLoopTargetCallback(this, CLI, Ident, OutlinedFn, ToBeDeletedVec, - LoopType); + LoopType, NoLoop); }; addOutlineInfo(std::move(OI)); return CLI->getAfterIP(); @@ -5167,9 +5168,9 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::applyWorkshareLoop( bool NeedsBarrier, omp::ScheduleKind SchedKind, Value *ChunkSize, bool HasSimdModifier, bool HasMonotonicModifier, bool HasNonmonotonicModifier, bool HasOrderedClause, - WorksharingLoopType LoopType) { + WorksharingLoopType LoopType, bool NoLoop) { if (Config.isTargetDevice()) - return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType); + return applyWorkshareLoopTarget(DL, CLI, AllocaIP, LoopType, NoLoop); OMPScheduleType EffectiveScheduleType = computeOpenMPScheduleType( SchedKind, ChunkSize, HasSimdModifier, HasMonotonicModifier, HasNonmonotonicModifier, HasOrderedClause); @@ -10312,17 +10313,19 @@ void OffloadEntriesInfoManager::getTargetRegionEntryFnName( TargetRegionEntryInfo OpenMPIRBuilder::getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack, + vfs::FileSystem &VFS, StringRef ParentName) { sys::fs::UniqueID ID(0xdeadf17e, 0); auto FileIDInfo = CallBack(); uint64_t FileID = 0; - std::error_code EC = sys::fs::getUniqueID(std::get<0>(FileIDInfo), ID); - // If the inode ID could not be determined, create a hash value - // the current file name and use that as an ID. - if (EC) + if (ErrorOr Status = VFS.status(std::get<0>(FileIDInfo))) { + ID = Status->getUniqueID(); + FileID = Status->getUniqueID().getFile(); + } else { + // If the inode ID could not be determined, create a hash value + // the current file name and use that as an ID. FileID = hash_value(std::get<0>(FileIDInfo)); - else - FileID = ID.getFile(); + } return TargetRegionEntryInfo(ParentName, ID.getDevice(), FileID, std::get<1>(FileIDInfo)); diff --git a/llvm/lib/IR/Intrinsics.cpp b/llvm/lib/IR/Intrinsics.cpp index 4d2e8fadff4f7..6797a100ff732 100644 --- a/llvm/lib/IR/Intrinsics.cpp +++ b/llvm/lib/IR/Intrinsics.cpp @@ -207,7 +207,6 @@ DecodeIITType(unsigned &NextElt, ArrayRef Infos, bool IsScalableVector = (LastInfo == IIT_SCALABLE_VEC); IIT_Info Info = IIT_Info(Infos[NextElt++]); - unsigned StructElts = 2; switch (Info) { case IIT_Done: @@ -390,28 +389,9 @@ DecodeIITType(unsigned &NextElt, ArrayRef Infos, case IIT_EMPTYSTRUCT: OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0)); return; - case IIT_STRUCT9: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT8: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT7: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT6: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT5: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT4: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT3: - ++StructElts; - [[fallthrough]]; - case IIT_STRUCT2: { + case IIT_STRUCT: { + unsigned StructElts = Infos[NextElt++] + 2; + OutputTable.push_back( IITDescriptor::get(IITDescriptor::Struct, StructElts)); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index b2e76cc7a8a90..8c03d6f809d50 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -5869,9 +5869,7 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) { break; } case Intrinsic::call_preallocated_setup: { - auto *NumArgs = dyn_cast(Call.getArgOperand(0)); - Check(NumArgs != nullptr, - "llvm.call.preallocated.setup argument must be a constant"); + auto *NumArgs = cast(Call.getArgOperand(0)); bool FoundCall = false; for (User *U : Call.users()) { auto *UseCall = dyn_cast(U); diff --git a/llvm/lib/MC/DXContainerRootSignature.cpp b/llvm/lib/MC/DXContainerRootSignature.cpp index b9ebb7a9e789c..713aa3d8143e8 100644 --- a/llvm/lib/MC/DXContainerRootSignature.cpp +++ b/llvm/lib/MC/DXContainerRootSignature.cpp @@ -23,19 +23,20 @@ static uint32_t writePlaceholder(raw_svector_ostream &Stream) { static uint32_t rewriteOffsetToCurrentByte(raw_svector_ostream &Stream, uint32_t Offset) { uint32_t ByteOffset = Stream.tell(); - uint32_t Value = - support::endian::byte_swap( - ByteOffset); + uint32_t Value = support::endian::byte_swap( + ByteOffset, llvm::endianness::little); Stream.pwrite(reinterpret_cast(&Value), sizeof(Value), Offset); return ByteOffset; } size_t RootSignatureDesc::getSize() const { uint32_t StaticSamplersOffset = computeStaticSamplersOffset(); - size_t StaticSamplersSize = - StaticSamplers.size() * sizeof(dxbc::RTS0::v1::StaticSampler); + size_t StaticSamplersSize = sizeof(dxbc::RTS0::v1::StaticSampler); + if (Version > 2) + StaticSamplersSize = sizeof(dxbc::RTS0::v3::StaticSampler); - return size_t(StaticSamplersOffset) + StaticSamplersSize; + return size_t(StaticSamplersOffset) + + (StaticSamplersSize * StaticSamplers.size()); } uint32_t RootSignatureDesc::computeRootParametersOffset() const { @@ -171,6 +172,9 @@ void RootSignatureDesc::write(raw_ostream &OS) const { support::endian::write(BOS, S.ShaderRegister, llvm::endianness::little); support::endian::write(BOS, S.RegisterSpace, llvm::endianness::little); support::endian::write(BOS, S.ShaderVisibility, llvm::endianness::little); + + if (Version > 2) + support::endian::write(BOS, S.Flags, llvm::endianness::little); } assert(Storage.size() == getSize()); OS.write(Storage.data(), Storage.size()); diff --git a/llvm/lib/MC/MCSFrame.cpp b/llvm/lib/MC/MCSFrame.cpp index 910fcab7b4d75..d6fa54c087ca3 100644 --- a/llvm/lib/MC/MCSFrame.cpp +++ b/llvm/lib/MC/MCSFrame.cpp @@ -200,7 +200,7 @@ class SFrameEmitterImpl { return false; } - bool setCFAOffset(SFrameFRE &FRE, const SMLoc &Loc, size_t Offset) { + bool setCFAOffset(SFrameFRE &FRE, SMLoc Loc, size_t Offset) { if (!FRE.CFARegSet) { Streamer.getContext().reportWarning( Loc, "adjusting CFA offset without a base register. " diff --git a/llvm/lib/Object/ArchiveWriter.cpp b/llvm/lib/Object/ArchiveWriter.cpp index 6fc0889afc6a8..a11259748b9cc 100644 --- a/llvm/lib/Object/ArchiveWriter.cpp +++ b/llvm/lib/Object/ArchiveWriter.cpp @@ -1119,10 +1119,26 @@ Error writeArchiveToStream(raw_ostream &Out, // to switch to 64-bit. Note that the file can be larger than 4GB as long as // the last member starts before the 4GB offset. if (*HeadersSize + LastMemberHeaderOffset >= Sym64Threshold) { - if (Kind == object::Archive::K_DARWIN) + switch (Kind) { + case object::Archive::K_COFF: + // COFF format has no 64-bit version, so we use GNU64 instead. + if (!SymMap.Map.empty() && !SymMap.ECMap.empty()) + // Only the COFF format supports the ECSYMBOLS section, so don’t use + // GNU64 when two symbol maps are required. + return make_error( + "Archive is too large: ARM64X does not support archives larger " + "than 4GB"); + // Since this changes the headers, we need to recalculate everything. + return writeArchiveToStream(Out, NewMembers, WriteSymtab, + object::Archive::K_GNU64, Deterministic, + Thin, IsEC, Warn); + case object::Archive::K_DARWIN: Kind = object::Archive::K_DARWIN64; - else + break; + default: Kind = object::Archive::K_GNU64; + break; + } HeadersSize.reset(); } } diff --git a/llvm/lib/Object/DXContainer.cpp b/llvm/lib/Object/DXContainer.cpp index 031b9414f4c1a..7b7b8d88c63fc 100644 --- a/llvm/lib/Object/DXContainer.cpp +++ b/llvm/lib/Object/DXContainer.cpp @@ -276,10 +276,13 @@ Error DirectX::RootSignature::parse() { RootParametersOffset, NumParameters * sizeof(dxbc::RTS0::v1::RootParameterHeader)); - StaticSamplers.Stride = sizeof(dxbc::RTS0::v1::StaticSampler); - StaticSamplers.Data = PartData.substr( - StaticSamplersOffset, - NumStaticSamplers * sizeof(dxbc::RTS0::v1::StaticSampler)); + StaticSamplers.Stride = (Version <= 2) + ? sizeof(dxbc::RTS0::v1::StaticSampler) + : sizeof(dxbc::RTS0::v3::StaticSampler); + + StaticSamplers.Data = PartData.substr(StaticSamplersOffset, + static_cast(NumStaticSamplers) * + StaticSamplers.Stride); return Error::success(); } diff --git a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp index 910383816f43b..b00e45d912be1 100644 --- a/llvm/lib/ObjectYAML/DXContainerEmitter.cpp +++ b/llvm/lib/ObjectYAML/DXContainerEmitter.cpp @@ -343,6 +343,9 @@ Error DXContainerWriter::writeParts(raw_ostream &OS) { NewSampler.RegisterSpace = Param.RegisterSpace; NewSampler.ShaderVisibility = Param.ShaderVisibility; + if (RS.Version > 2) + NewSampler.Flags = Param.getEncodedFlags(); + RS.StaticSamplers.push_back(NewSampler); } diff --git a/llvm/lib/ObjectYAML/DXContainerYAML.cpp b/llvm/lib/ObjectYAML/DXContainerYAML.cpp index 22674b1ceb734..3c09ae4e5f2bc 100644 --- a/llvm/lib/ObjectYAML/DXContainerYAML.cpp +++ b/llvm/lib/ObjectYAML/DXContainerYAML.cpp @@ -209,6 +209,11 @@ DXContainerYAML::RootSignatureYamlDesc::create( NewS.RegisterSpace = S.RegisterSpace; NewS.ShaderVisibility = dxbc::ShaderVisibility(S.ShaderVisibility); + if (Version > 2) { +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) \ + NewS.Enum = (S.Flags & llvm::to_underlying(dxbc::StaticSamplerFlags::Enum)); +#include "llvm/BinaryFormat/DXContainerConstants.def" + } RootSigDesc.StaticSamplers.push_back(NewS); } @@ -245,6 +250,15 @@ uint32_t DXContainerYAML::DescriptorRangeYaml::getEncodedFlags() const { return Flags; } +uint32_t DXContainerYAML::StaticSamplerYamlDesc::getEncodedFlags() const { + uint64_t Flags = 0; +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) \ + if (Enum) \ + Flags |= (uint32_t)dxbc::StaticSamplerFlags::Enum; +#include "llvm/BinaryFormat/DXContainerConstants.def" + return Flags; +} + uint64_t DXContainerYAML::ShaderFeatureFlags::getEncodedFlags() { uint64_t Flag = 0; #define SHADER_FEATURE_FLAG(Num, DxilModuleNum, Val, Str) \ @@ -512,6 +526,9 @@ void MappingTraits::mapping( IO.mapRequired("ShaderRegister", S.ShaderRegister); IO.mapRequired("RegisterSpace", S.RegisterSpace); IO.mapRequired("ShaderVisibility", S.ShaderVisibility); +#define STATIC_SAMPLER_FLAG(Num, Enum, Flag) \ + IO.mapOptional(#Flag, S.Enum, false); +#include "llvm/BinaryFormat/DXContainerConstants.def" } void MappingTraits::mapping(IO &IO, diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp index fc2577e6ada5d..075ad8d7aec8b 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMappingReader.cpp @@ -949,9 +949,9 @@ loadTestingFormat(StringRef Data, StringRef CompilationDir) { if (Data.size() < sizeof(uint64_t)) return make_error(coveragemap_error::malformed, "the size of data is too small"); - auto TestingVersion = - support::endian::byte_swap( - *reinterpret_cast(Data.data())); + auto TestingVersion = support::endian::byte_swap( + *reinterpret_cast(Data.data()), + llvm::endianness::little); Data = Data.substr(sizeof(uint64_t)); // Read the ProfileNames data. @@ -1274,9 +1274,9 @@ BinaryCoverageReader::create( std::vector> Readers; if (ObjectBuffer.getBuffer().size() > sizeof(TestingFormatMagic)) { - uint64_t Magic = - support::endian::byte_swap( - *reinterpret_cast(ObjectBuffer.getBufferStart())); + uint64_t Magic = support::endian::byte_swap( + *reinterpret_cast(ObjectBuffer.getBufferStart()), + llvm::endianness::little); if (Magic == TestingFormatMagic) { // This is a special format used for testing. auto ReaderOrErr = diff --git a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp index 12b1687af69db..3875f01c48528 100644 --- a/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp +++ b/llvm/lib/ProfileData/Coverage/CoverageMappingWriter.cpp @@ -292,7 +292,7 @@ void CoverageMappingWriter::write(raw_ostream &OS) { void TestingFormatWriter::write(raw_ostream &OS, TestingFormatVersion Version) { auto ByteSwap = [](uint64_t N) { - return support::endian::byte_swap(N); + return support::endian::byte_swap(N, llvm::endianness::little); }; // Output a 64bit magic number. diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp index e1c6315853b3b..3c8e44a18f533 100644 --- a/llvm/lib/ProfileData/InstrProf.cpp +++ b/llvm/lib/ProfileData/InstrProf.cpp @@ -292,7 +292,7 @@ void ProfOStream::patch(ArrayRef P) { for (const auto &K : P) { for (int I = 0, E = K.D.size(); I != E; I++) { uint64_t Bytes = - endian::byte_swap(K.D[I]); + endian::byte_swap(K.D[I], llvm::endianness::little); Data.replace(K.Pos + I * sizeof(uint64_t), sizeof(uint64_t), (const char *)&Bytes, sizeof(uint64_t)); } diff --git a/llvm/lib/ProfileData/InstrProfReader.cpp b/llvm/lib/ProfileData/InstrProfReader.cpp index 886add7131da2..d2ae4b5226ff6 100644 --- a/llvm/lib/ProfileData/InstrProfReader.cpp +++ b/llvm/lib/ProfileData/InstrProfReader.cpp @@ -1171,8 +1171,8 @@ bool IndexedInstrProfReader::hasFormat(const MemoryBuffer &DataBuffer) { if (DataBuffer.getBufferSize() < 8) return false; - uint64_t Magic = endian::read( - DataBuffer.getBufferStart()); + uint64_t Magic = endian::read(DataBuffer.getBufferStart(), + llvm::endianness::little); // Verify that it's magical. return Magic == IndexedInstrProf::Magic; } @@ -1186,10 +1186,10 @@ IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version, if (Version >= IndexedInstrProf::Version4) { const IndexedInstrProf::Summary *SummaryInLE = reinterpret_cast(Cur); - uint64_t NFields = endian::byte_swap( - SummaryInLE->NumSummaryFields); - uint64_t NEntries = endian::byte_swap( - SummaryInLE->NumCutoffEntries); + uint64_t NFields = endian::byte_swap( + SummaryInLE->NumSummaryFields, llvm::endianness::little); + uint64_t NEntries = endian::byte_swap( + SummaryInLE->NumCutoffEntries, llvm::endianness::little); uint32_t SummarySize = IndexedInstrProf::Summary::getSize(NFields, NEntries); std::unique_ptr SummaryData = @@ -1198,7 +1198,7 @@ IndexedInstrProfReader::readSummary(IndexedInstrProf::ProfVersion Version, const uint64_t *Src = reinterpret_cast(SummaryInLE); uint64_t *Dst = reinterpret_cast(SummaryData.get()); for (unsigned I = 0; I < SummarySize / sizeof(uint64_t); I++) - Dst[I] = endian::byte_swap(Src[I]); + Dst[I] = endian::byte_swap(Src[I], llvm::endianness::little); SummaryEntryVector DetailedSummary; for (unsigned I = 0; I < SummaryData->NumCutoffEntries; I++) { @@ -1598,8 +1598,8 @@ Error IndexedInstrProfReader::getFunctionBitmap(StringRef FuncName, std::memset(W, 0, sizeof(W)); std::memcpy(W, &BitmapBytes[I], N); I += N; - return support::endian::read(W); + return support::endian::read( + W, llvm::endianness::little); }, Bitmap, Bitmap); assert(I == E); diff --git a/llvm/lib/ProfileData/SampleProfReader.cpp b/llvm/lib/ProfileData/SampleProfReader.cpp index 81ae792e70b99..766c0814ca067 100644 --- a/llvm/lib/ProfileData/SampleProfReader.cpp +++ b/llvm/lib/ProfileData/SampleProfReader.cpp @@ -1290,8 +1290,8 @@ SampleProfileReaderExtBinaryBase::readNameTableSec(bool IsMD5, NameTable.reserve(*Size); for (size_t I = 0; I < *Size; ++I) { using namespace support; - uint64_t FID = endian::read( - Data + I * sizeof(uint64_t)); + uint64_t FID = endian::read( + Data + I * sizeof(uint64_t), endianness::little); NameTable.emplace_back(FunctionId(FID)); } if (!ProfileIsCS) diff --git a/llvm/lib/Support/DXILABI.cpp b/llvm/lib/Support/DXILABI.cpp index 082e32061bd45..ba6e16a0181c7 100644 --- a/llvm/lib/Support/DXILABI.cpp +++ b/llvm/lib/Support/DXILABI.cpp @@ -1,33 +1,33 @@ -//===-- DXILABI.cpp - ABI Sensitive Values for DXIL -----------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file contains definitions of various constants and enums that are -// required to remain stable as per the DXIL format's requirements. -// -// Documentation for DXIL can be found in -// https://github.com/Microsoft/DirectXShaderCompiler/blob/main/docs/DXIL.rst. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Support/DXILABI.h" -#include "llvm/Support/ErrorHandling.h" -using namespace llvm; - -StringRef dxil::getResourceClassName(dxil::ResourceClass RC) { - switch (RC) { - case dxil::ResourceClass::SRV: - return "SRV"; - case dxil::ResourceClass::UAV: - return "UAV"; - case dxil::ResourceClass::CBuffer: - return "CBV"; - case dxil::ResourceClass::Sampler: - return "Sampler"; - } - llvm_unreachable("Invalid ResourceClass enum value"); -} +//===-- DXILABI.cpp - ABI Sensitive Values for DXIL -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains definitions of various constants and enums that are +// required to remain stable as per the DXIL format's requirements. +// +// Documentation for DXIL can be found in +// https://github.com/Microsoft/DirectXShaderCompiler/blob/main/docs/DXIL.rst. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/DXILABI.h" +#include "llvm/Support/ErrorHandling.h" +using namespace llvm; + +StringRef dxil::getResourceClassName(dxil::ResourceClass RC) { + switch (RC) { + case dxil::ResourceClass::SRV: + return "SRV"; + case dxil::ResourceClass::UAV: + return "UAV"; + case dxil::ResourceClass::CBuffer: + return "CBV"; + case dxil::ResourceClass::Sampler: + return "Sampler"; + } + llvm_unreachable("Invalid ResourceClass enum value"); +} diff --git a/llvm/lib/Support/FileCollector.cpp b/llvm/lib/Support/FileCollector.cpp index edb5313d43eec..5dc224a6d427b 100644 --- a/llvm/lib/Support/FileCollector.cpp +++ b/llvm/lib/Support/FileCollector.cpp @@ -49,8 +49,9 @@ static bool isCaseSensitivePath(StringRef Path) { return true; } -FileCollector::FileCollector(std::string Root, std::string OverlayRoot) - : Root(Root), OverlayRoot(OverlayRoot) { +FileCollector::FileCollector(std::string Root, std::string OverlayRoot, + IntrusiveRefCntPtr VFS) + : Root(Root), OverlayRoot(OverlayRoot), Canonicalizer(std::move(VFS)) { assert(sys::path::is_absolute(Root) && "Root not absolute"); assert(sys::path::is_absolute(OverlayRoot) && "OverlayRoot not absolute"); } @@ -88,9 +89,9 @@ void FileCollector::PathCanonicalizer::updateWithRealPath( } /// Make Path absolute. -static void makeAbsolute(SmallVectorImpl &Path) { +static void makeAbsolute(vfs::FileSystem &VFS, SmallVectorImpl &Path) { // We need an absolute src path to append to the root. - sys::fs::make_absolute(Path); + VFS.makeAbsolute(Path); // Canonicalize src to a native path to avoid mixed separator styles. sys::path::native(Path); @@ -105,7 +106,7 @@ FileCollector::PathCanonicalizer::PathStorage FileCollector::PathCanonicalizer::canonicalize(StringRef SrcPath) { PathStorage Paths; Paths.VirtualPath = SrcPath; - makeAbsolute(Paths.VirtualPath); + makeAbsolute(*VFS, Paths.VirtualPath); // If a ".." component is present after a symlink component, remove_dots may // lead to the wrong real destination path. Let the source be canonicalized diff --git a/llvm/lib/Support/Mustache.cpp b/llvm/lib/Support/Mustache.cpp index be9cbfd46982f..686688ad6c25f 100644 --- a/llvm/lib/Support/Mustache.cpp +++ b/llvm/lib/Support/Mustache.cpp @@ -397,19 +397,32 @@ class EscapeStringStream : public raw_ostream { public: explicit EscapeStringStream(llvm::raw_ostream &WrappedStream, EscapeMap &Escape) - : Escape(Escape), WrappedStream(WrappedStream) { + : Escape(Escape), EscapeChars(Escape.keys().begin(), Escape.keys().end()), + WrappedStream(WrappedStream) { SetUnbuffered(); } protected: void write_impl(const char *Ptr, size_t Size) override { - llvm::StringRef Data(Ptr, Size); - for (char C : Data) { - auto It = Escape.find(C); - if (It != Escape.end()) - WrappedStream << It->getSecond(); - else - WrappedStream << C; + StringRef Data(Ptr, Size); + size_t Start = 0; + while (Start < Size) { + // Find the next character that needs to be escaped. + size_t Next = Data.find_first_of(EscapeChars.str(), Start); + + // If no escapable characters are found, write the rest of the string. + if (Next == StringRef::npos) { + WrappedStream << Data.substr(Start); + return; + } + + // Write the chunk of text before the escapable character. + if (Next > Start) + WrappedStream << Data.substr(Start, Next - Start); + + // Look up and write the escaped version of the character. + WrappedStream << Escape[Data[Next]]; + Start = Next + 1; } } @@ -417,6 +430,7 @@ class EscapeStringStream : public raw_ostream { private: EscapeMap &Escape; + SmallString<8> EscapeChars; llvm::raw_ostream &WrappedStream; }; diff --git a/llvm/lib/Support/rpmalloc/rpmalloc.h b/llvm/lib/Support/rpmalloc/rpmalloc.h index 3911c53b779b3..5b7fe1ff4286b 100644 --- a/llvm/lib/Support/rpmalloc/rpmalloc.h +++ b/llvm/lib/Support/rpmalloc/rpmalloc.h @@ -1,428 +1,428 @@ -//===---------------------- rpmalloc.h ------------------*- C -*-=============// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This library provides a cross-platform lock free thread caching malloc -// implementation in C11. -// -//===----------------------------------------------------------------------===// - -#pragma once - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#if defined(__clang__) || defined(__GNUC__) -#define RPMALLOC_EXPORT __attribute__((visibility("default"))) -#define RPMALLOC_ALLOCATOR -#if (defined(__clang_major__) && (__clang_major__ < 4)) || \ - (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD) -#define RPMALLOC_ATTRIB_MALLOC -#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) -#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) -#else -#define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__)) -#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size))) -#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) \ - __attribute__((alloc_size(count, size))) -#endif -#define RPMALLOC_CDECL -#elif defined(_MSC_VER) -#define RPMALLOC_EXPORT -#define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict) -#define RPMALLOC_ATTRIB_MALLOC -#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) -#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) -#define RPMALLOC_CDECL __cdecl -#else -#define RPMALLOC_EXPORT -#define RPMALLOC_ALLOCATOR -#define RPMALLOC_ATTRIB_MALLOC -#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) -#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) -#define RPMALLOC_CDECL -#endif - -//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce -// a very small overhead due to some size calculations not being compile time -// constants -#ifndef RPMALLOC_CONFIGURABLE -#define RPMALLOC_CONFIGURABLE 0 -#endif - -//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* -//! functions). -// Will introduce a very small overhead to track fully allocated spans in heaps -#ifndef RPMALLOC_FIRST_CLASS_HEAPS -#define RPMALLOC_FIRST_CLASS_HEAPS 0 -#endif - -//! Flag to rpaligned_realloc to not preserve content in reallocation -#define RPMALLOC_NO_PRESERVE 1 -//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be -//! done in-place, -// in which case the original pointer is still valid (just like a call to -// realloc which failes to allocate a new block). -#define RPMALLOC_GROW_OR_FAIL 2 - -typedef struct rpmalloc_global_statistics_t { - //! Current amount of virtual memory mapped, all of which might not have been - //! committed (only if ENABLE_STATISTICS=1) - size_t mapped; - //! Peak amount of virtual memory mapped, all of which might not have been - //! committed (only if ENABLE_STATISTICS=1) - size_t mapped_peak; - //! Current amount of memory in global caches for small and medium sizes - //! (<32KiB) - size_t cached; - //! Current amount of memory allocated in huge allocations, i.e larger than - //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1) - size_t huge_alloc; - //! Peak amount of memory allocated in huge allocations, i.e larger than - //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1) - size_t huge_alloc_peak; - //! Total amount of memory mapped since initialization (only if - //! ENABLE_STATISTICS=1) - size_t mapped_total; - //! Total amount of memory unmapped since initialization (only if - //! ENABLE_STATISTICS=1) - size_t unmapped_total; -} rpmalloc_global_statistics_t; - -typedef struct rpmalloc_thread_statistics_t { - //! Current number of bytes available in thread size class caches for small - //! and medium sizes (<32KiB) - size_t sizecache; - //! Current number of bytes available in thread span caches for small and - //! medium sizes (<32KiB) - size_t spancache; - //! Total number of bytes transitioned from thread cache to global cache (only - //! if ENABLE_STATISTICS=1) - size_t thread_to_global; - //! Total number of bytes transitioned from global cache to thread cache (only - //! if ENABLE_STATISTICS=1) - size_t global_to_thread; - //! Per span count statistics (only if ENABLE_STATISTICS=1) - struct { - //! Currently used number of spans - size_t current; - //! High water mark of spans used - size_t peak; - //! Number of spans transitioned to global cache - size_t to_global; - //! Number of spans transitioned from global cache - size_t from_global; - //! Number of spans transitioned to thread cache - size_t to_cache; - //! Number of spans transitioned from thread cache - size_t from_cache; - //! Number of spans transitioned to reserved state - size_t to_reserved; - //! Number of spans transitioned from reserved state - size_t from_reserved; - //! Number of raw memory map calls (not hitting the reserve spans but - //! resulting in actual OS mmap calls) - size_t map_calls; - } span_use[64]; - //! Per size class statistics (only if ENABLE_STATISTICS=1) - struct { - //! Current number of allocations - size_t alloc_current; - //! Peak number of allocations - size_t alloc_peak; - //! Total number of allocations - size_t alloc_total; - //! Total number of frees - size_t free_total; - //! Number of spans transitioned to cache - size_t spans_to_cache; - //! Number of spans transitioned from cache - size_t spans_from_cache; - //! Number of spans transitioned from reserved state - size_t spans_from_reserved; - //! Number of raw memory map calls (not hitting the reserve spans but - //! resulting in actual OS mmap calls) - size_t map_calls; - } size_use[128]; -} rpmalloc_thread_statistics_t; - -typedef struct rpmalloc_config_t { - //! Map memory pages for the given number of bytes. The returned address MUST - //! be - // aligned to the rpmalloc span size, which will always be a power of two. - // Optionally the function can store an alignment offset in the offset - // variable in case it performs alignment and the returned pointer is offset - // from the actual start of the memory region due to this alignment. The - // alignment offset will be passed to the memory unmap function. The - // alignment offset MUST NOT be larger than 65535 (storable in an uint16_t), - // if it is you must use natural alignment to shift it into 16 bits. If you - // set a memory_map function, you must also set a memory_unmap function or - // else the default implementation will be used for both. This function must - // be thread safe, it can be called by multiple threads simultaneously. - void *(*memory_map)(size_t size, size_t *offset); - //! Unmap the memory pages starting at address and spanning the given number - //! of bytes. - // If release is set to non-zero, the unmap is for an entire span range as - // returned by a previous call to memory_map and that the entire range should - // be released. The release argument holds the size of the entire span range. - // If release is set to 0, the unmap is a partial decommit of a subset of the - // mapped memory range. If you set a memory_unmap function, you must also set - // a memory_map function or else the default implementation will be used for - // both. This function must be thread safe, it can be called by multiple - // threads simultaneously. - void (*memory_unmap)(void *address, size_t size, size_t offset, - size_t release); - //! Called when an assert fails, if asserts are enabled. Will use the standard - //! assert() - // if this is not set. - void (*error_callback)(const char *message); - //! Called when a call to map memory pages fails (out of memory). If this - //! callback is - // not set or returns zero the library will return a null pointer in the - // allocation call. If this callback returns non-zero the map call will be - // retried. The argument passed is the number of bytes that was requested in - // the map call. Only used if the default system memory map function is used - // (memory_map callback is not set). - int (*map_fail_callback)(size_t size); - //! Size of memory pages. The page size MUST be a power of two. All memory - //! mapping - // requests to memory_map will be made with size set to a multiple of the - // page size. Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system - // page size is used. - size_t page_size; - //! Size of a span of memory blocks. MUST be a power of two, and in - //! [4096,262144] - // range (unless 0 - set to 0 to use the default span size). Used if - // RPMALLOC_CONFIGURABLE is defined to 1. - size_t span_size; - //! Number of spans to map at each request to map new virtual memory blocks. - //! This can - // be used to minimize the system call overhead at the cost of virtual memory - // address space. The extra mapped pages will not be written until actually - // used, so physical committed memory should not be affected in the default - // implementation. Will be aligned to a multiple of spans that match memory - // page size in case of huge pages. - size_t span_map_count; - //! Enable use of large/huge pages. If this flag is set to non-zero and page - //! size is - // zero, the allocator will try to enable huge pages and auto detect the - // configuration. If this is set to non-zero and page_size is also non-zero, - // the allocator will assume huge pages have been configured and enabled - // prior to initializing the allocator. For Windows, see - // https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support - // For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt - int enable_huge_pages; - //! Respectively allocated pages and huge allocated pages names for systems - // supporting it to be able to distinguish among anonymous regions. - const char *page_name; - const char *huge_page_name; -} rpmalloc_config_t; - -//! Initialize allocator with default configuration -RPMALLOC_EXPORT int rpmalloc_initialize(void); - -//! Initialize allocator with given configuration -RPMALLOC_EXPORT int rpmalloc_initialize_config(const rpmalloc_config_t *config); - -//! Get allocator configuration -RPMALLOC_EXPORT const rpmalloc_config_t *rpmalloc_config(void); - -//! Finalize allocator -RPMALLOC_EXPORT void rpmalloc_finalize(void); - -//! Initialize allocator for calling thread -RPMALLOC_EXPORT void rpmalloc_thread_initialize(void); - -//! Finalize allocator for calling thread -RPMALLOC_EXPORT void rpmalloc_thread_finalize(int release_caches); - -//! Perform deferred deallocations pending for the calling thread heap -RPMALLOC_EXPORT void rpmalloc_thread_collect(void); - -//! Query if allocator is initialized for calling thread -RPMALLOC_EXPORT int rpmalloc_is_thread_initialized(void); - -//! Get per-thread statistics -RPMALLOC_EXPORT void -rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats); - -//! Get global statistics -RPMALLOC_EXPORT void -rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats); - -//! Dump all statistics in human readable format to file (should be a FILE*) -RPMALLOC_EXPORT void rpmalloc_dump_statistics(void *file); - -//! Allocate a memory block of at least the given size -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1); - -//! Free the given memory block -RPMALLOC_EXPORT void rpfree(void *ptr); - -//! Allocate a memory block of at least the given size and zero initialize it -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2); - -//! Reallocate the given block to at least the given size -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rprealloc(void *ptr, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(2); - -//! Reallocate the given block to at least the given size and alignment, -// with optional control flags (see RPMALLOC_NO_PRESERVE). -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize, - unsigned int flags) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(3); - -//! Allocate a memory block of at least the given size and alignment. -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(2); - -//! Allocate a memory block of at least the given size and alignment, and zero -//! initialize it. -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpaligned_calloc(size_t alignment, size_t num, - size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); - -//! Allocate a memory block of at least the given size and alignment. -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(2); - -//! Allocate a memory block of at least the given size and alignment. -// Alignment must be a power of two and a multiple of sizeof(void*), -// and should ideally be less than memory page size. A caveat of rpmalloc -// internals is that this must also be strictly less than the span size -// (default 64KiB) -RPMALLOC_EXPORT int rpposix_memalign(void **memptr, size_t alignment, - size_t size); - -//! Query the usable size of the given memory block (from given pointer to the -//! end of block) -RPMALLOC_EXPORT size_t rpmalloc_usable_size(void *ptr); - -//! Dummy empty function for forcing linker symbol inclusion -RPMALLOC_EXPORT void rpmalloc_linker_reference(void); - -#if RPMALLOC_FIRST_CLASS_HEAPS - -//! Heap type -typedef struct heap_t rpmalloc_heap_t; - -//! Acquire a new heap. Will reuse existing released heaps or allocate memory -//! for a new heap -// if none available. Heap API is implemented with the strict assumption that -// only one single thread will call heap functions for a given heap at any -// given time, no functions are thread safe. -RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_heap_acquire(void); - -//! Release a heap (does NOT free the memory allocated by the heap, use -//! rpmalloc_heap_free_all before destroying the heap). -// Releasing a heap will enable it to be reused by other threads. Safe to pass -// a null pointer. -RPMALLOC_EXPORT void rpmalloc_heap_release(rpmalloc_heap_t *heap); - -//! Allocate a memory block of at least the given size using the given heap. -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_alloc(rpmalloc_heap_t *heap, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(2); - -//! Allocate a memory block of at least the given size using the given heap. The -//! returned -// block will have the requested alignment. Alignment must be a power of two -// and a multiple of sizeof(void*), and should ideally be less than memory page -// size. A caveat of rpmalloc internals is that this must also be strictly less -// than the span size (default 64KiB). -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap, size_t alignment, - size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(3); - -//! Allocate a memory block of at least the given size using the given heap and -//! zero initialize it. -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_calloc(rpmalloc_heap_t *heap, size_t num, - size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); - -//! Allocate a memory block of at least the given size using the given heap and -//! zero initialize it. The returned -// block will have the requested alignment. Alignment must either be zero, or a -// power of two and a multiple of sizeof(void*), and should ideally be less -// than memory page size. A caveat of rpmalloc internals is that this must also -// be strictly less than the span size (default 64KiB). -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap, size_t alignment, - size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); - -//! Reallocate the given block to at least the given size. The memory block MUST -//! be allocated -// by the same heap given to this function. -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * -rpmalloc_heap_realloc(rpmalloc_heap_t *heap, void *ptr, size_t size, - unsigned int flags) RPMALLOC_ATTRIB_MALLOC - RPMALLOC_ATTRIB_ALLOC_SIZE(3); - -//! Reallocate the given block to at least the given size. The memory block MUST -//! be allocated -// by the same heap given to this function. The returned block will have the -// requested alignment. Alignment must be either zero, or a power of two and a -// multiple of sizeof(void*), and should ideally be less than memory page size. -// A caveat of rpmalloc internals is that this must also be strictly less than -// the span size (default 64KiB). -RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *rpmalloc_heap_aligned_realloc( - rpmalloc_heap_t *heap, void *ptr, size_t alignment, size_t size, - unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4); - -//! Free the given memory block from the given heap. The memory block MUST be -//! allocated -// by the same heap given to this function. -RPMALLOC_EXPORT void rpmalloc_heap_free(rpmalloc_heap_t *heap, void *ptr); - -//! Free all memory allocated by the heap -RPMALLOC_EXPORT void rpmalloc_heap_free_all(rpmalloc_heap_t *heap); - -//! Set the given heap as the current heap for the calling thread. A heap MUST -//! only be current heap -// for a single thread, a heap can never be shared between multiple threads. -// The previous current heap for the calling thread is released to be reused by -// other threads. -RPMALLOC_EXPORT void rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap); - -//! Returns which heap the given pointer is allocated on -RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_get_heap_for_ptr(void *ptr); - -#endif - -#ifdef __cplusplus -} -#endif +//===---------------------- rpmalloc.h ------------------*- C -*-=============// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This library provides a cross-platform lock free thread caching malloc +// implementation in C11. +// +//===----------------------------------------------------------------------===// + +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__clang__) || defined(__GNUC__) +#define RPMALLOC_EXPORT __attribute__((visibility("default"))) +#define RPMALLOC_ALLOCATOR +#if (defined(__clang_major__) && (__clang_major__ < 4)) || \ + (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD) +#define RPMALLOC_ATTRIB_MALLOC +#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) +#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) +#else +#define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__)) +#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size))) +#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) \ + __attribute__((alloc_size(count, size))) +#endif +#define RPMALLOC_CDECL +#elif defined(_MSC_VER) +#define RPMALLOC_EXPORT +#define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict) +#define RPMALLOC_ATTRIB_MALLOC +#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) +#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) +#define RPMALLOC_CDECL __cdecl +#else +#define RPMALLOC_EXPORT +#define RPMALLOC_ALLOCATOR +#define RPMALLOC_ATTRIB_MALLOC +#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) +#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) +#define RPMALLOC_CDECL +#endif + +//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce +// a very small overhead due to some size calculations not being compile time +// constants +#ifndef RPMALLOC_CONFIGURABLE +#define RPMALLOC_CONFIGURABLE 0 +#endif + +//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* +//! functions). +// Will introduce a very small overhead to track fully allocated spans in heaps +#ifndef RPMALLOC_FIRST_CLASS_HEAPS +#define RPMALLOC_FIRST_CLASS_HEAPS 0 +#endif + +//! Flag to rpaligned_realloc to not preserve content in reallocation +#define RPMALLOC_NO_PRESERVE 1 +//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be +//! done in-place, +// in which case the original pointer is still valid (just like a call to +// realloc which failes to allocate a new block). +#define RPMALLOC_GROW_OR_FAIL 2 + +typedef struct rpmalloc_global_statistics_t { + //! Current amount of virtual memory mapped, all of which might not have been + //! committed (only if ENABLE_STATISTICS=1) + size_t mapped; + //! Peak amount of virtual memory mapped, all of which might not have been + //! committed (only if ENABLE_STATISTICS=1) + size_t mapped_peak; + //! Current amount of memory in global caches for small and medium sizes + //! (<32KiB) + size_t cached; + //! Current amount of memory allocated in huge allocations, i.e larger than + //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1) + size_t huge_alloc; + //! Peak amount of memory allocated in huge allocations, i.e larger than + //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1) + size_t huge_alloc_peak; + //! Total amount of memory mapped since initialization (only if + //! ENABLE_STATISTICS=1) + size_t mapped_total; + //! Total amount of memory unmapped since initialization (only if + //! ENABLE_STATISTICS=1) + size_t unmapped_total; +} rpmalloc_global_statistics_t; + +typedef struct rpmalloc_thread_statistics_t { + //! Current number of bytes available in thread size class caches for small + //! and medium sizes (<32KiB) + size_t sizecache; + //! Current number of bytes available in thread span caches for small and + //! medium sizes (<32KiB) + size_t spancache; + //! Total number of bytes transitioned from thread cache to global cache (only + //! if ENABLE_STATISTICS=1) + size_t thread_to_global; + //! Total number of bytes transitioned from global cache to thread cache (only + //! if ENABLE_STATISTICS=1) + size_t global_to_thread; + //! Per span count statistics (only if ENABLE_STATISTICS=1) + struct { + //! Currently used number of spans + size_t current; + //! High water mark of spans used + size_t peak; + //! Number of spans transitioned to global cache + size_t to_global; + //! Number of spans transitioned from global cache + size_t from_global; + //! Number of spans transitioned to thread cache + size_t to_cache; + //! Number of spans transitioned from thread cache + size_t from_cache; + //! Number of spans transitioned to reserved state + size_t to_reserved; + //! Number of spans transitioned from reserved state + size_t from_reserved; + //! Number of raw memory map calls (not hitting the reserve spans but + //! resulting in actual OS mmap calls) + size_t map_calls; + } span_use[64]; + //! Per size class statistics (only if ENABLE_STATISTICS=1) + struct { + //! Current number of allocations + size_t alloc_current; + //! Peak number of allocations + size_t alloc_peak; + //! Total number of allocations + size_t alloc_total; + //! Total number of frees + size_t free_total; + //! Number of spans transitioned to cache + size_t spans_to_cache; + //! Number of spans transitioned from cache + size_t spans_from_cache; + //! Number of spans transitioned from reserved state + size_t spans_from_reserved; + //! Number of raw memory map calls (not hitting the reserve spans but + //! resulting in actual OS mmap calls) + size_t map_calls; + } size_use[128]; +} rpmalloc_thread_statistics_t; + +typedef struct rpmalloc_config_t { + //! Map memory pages for the given number of bytes. The returned address MUST + //! be + // aligned to the rpmalloc span size, which will always be a power of two. + // Optionally the function can store an alignment offset in the offset + // variable in case it performs alignment and the returned pointer is offset + // from the actual start of the memory region due to this alignment. The + // alignment offset will be passed to the memory unmap function. The + // alignment offset MUST NOT be larger than 65535 (storable in an uint16_t), + // if it is you must use natural alignment to shift it into 16 bits. If you + // set a memory_map function, you must also set a memory_unmap function or + // else the default implementation will be used for both. This function must + // be thread safe, it can be called by multiple threads simultaneously. + void *(*memory_map)(size_t size, size_t *offset); + //! Unmap the memory pages starting at address and spanning the given number + //! of bytes. + // If release is set to non-zero, the unmap is for an entire span range as + // returned by a previous call to memory_map and that the entire range should + // be released. The release argument holds the size of the entire span range. + // If release is set to 0, the unmap is a partial decommit of a subset of the + // mapped memory range. If you set a memory_unmap function, you must also set + // a memory_map function or else the default implementation will be used for + // both. This function must be thread safe, it can be called by multiple + // threads simultaneously. + void (*memory_unmap)(void *address, size_t size, size_t offset, + size_t release); + //! Called when an assert fails, if asserts are enabled. Will use the standard + //! assert() + // if this is not set. + void (*error_callback)(const char *message); + //! Called when a call to map memory pages fails (out of memory). If this + //! callback is + // not set or returns zero the library will return a null pointer in the + // allocation call. If this callback returns non-zero the map call will be + // retried. The argument passed is the number of bytes that was requested in + // the map call. Only used if the default system memory map function is used + // (memory_map callback is not set). + int (*map_fail_callback)(size_t size); + //! Size of memory pages. The page size MUST be a power of two. All memory + //! mapping + // requests to memory_map will be made with size set to a multiple of the + // page size. Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system + // page size is used. + size_t page_size; + //! Size of a span of memory blocks. MUST be a power of two, and in + //! [4096,262144] + // range (unless 0 - set to 0 to use the default span size). Used if + // RPMALLOC_CONFIGURABLE is defined to 1. + size_t span_size; + //! Number of spans to map at each request to map new virtual memory blocks. + //! This can + // be used to minimize the system call overhead at the cost of virtual memory + // address space. The extra mapped pages will not be written until actually + // used, so physical committed memory should not be affected in the default + // implementation. Will be aligned to a multiple of spans that match memory + // page size in case of huge pages. + size_t span_map_count; + //! Enable use of large/huge pages. If this flag is set to non-zero and page + //! size is + // zero, the allocator will try to enable huge pages and auto detect the + // configuration. If this is set to non-zero and page_size is also non-zero, + // the allocator will assume huge pages have been configured and enabled + // prior to initializing the allocator. For Windows, see + // https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support + // For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt + int enable_huge_pages; + //! Respectively allocated pages and huge allocated pages names for systems + // supporting it to be able to distinguish among anonymous regions. + const char *page_name; + const char *huge_page_name; +} rpmalloc_config_t; + +//! Initialize allocator with default configuration +RPMALLOC_EXPORT int rpmalloc_initialize(void); + +//! Initialize allocator with given configuration +RPMALLOC_EXPORT int rpmalloc_initialize_config(const rpmalloc_config_t *config); + +//! Get allocator configuration +RPMALLOC_EXPORT const rpmalloc_config_t *rpmalloc_config(void); + +//! Finalize allocator +RPMALLOC_EXPORT void rpmalloc_finalize(void); + +//! Initialize allocator for calling thread +RPMALLOC_EXPORT void rpmalloc_thread_initialize(void); + +//! Finalize allocator for calling thread +RPMALLOC_EXPORT void rpmalloc_thread_finalize(int release_caches); + +//! Perform deferred deallocations pending for the calling thread heap +RPMALLOC_EXPORT void rpmalloc_thread_collect(void); + +//! Query if allocator is initialized for calling thread +RPMALLOC_EXPORT int rpmalloc_is_thread_initialized(void); + +//! Get per-thread statistics +RPMALLOC_EXPORT void +rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats); + +//! Get global statistics +RPMALLOC_EXPORT void +rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats); + +//! Dump all statistics in human readable format to file (should be a FILE*) +RPMALLOC_EXPORT void rpmalloc_dump_statistics(void *file); + +//! Allocate a memory block of at least the given size +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1); + +//! Free the given memory block +RPMALLOC_EXPORT void rpfree(void *ptr); + +//! Allocate a memory block of at least the given size and zero initialize it +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2); + +//! Reallocate the given block to at least the given size +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rprealloc(void *ptr, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(2); + +//! Reallocate the given block to at least the given size and alignment, +// with optional control flags (see RPMALLOC_NO_PRESERVE). +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize, + unsigned int flags) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(3); + +//! Allocate a memory block of at least the given size and alignment. +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(2); + +//! Allocate a memory block of at least the given size and alignment, and zero +//! initialize it. +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpaligned_calloc(size_t alignment, size_t num, + size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); + +//! Allocate a memory block of at least the given size and alignment. +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(2); + +//! Allocate a memory block of at least the given size and alignment. +// Alignment must be a power of two and a multiple of sizeof(void*), +// and should ideally be less than memory page size. A caveat of rpmalloc +// internals is that this must also be strictly less than the span size +// (default 64KiB) +RPMALLOC_EXPORT int rpposix_memalign(void **memptr, size_t alignment, + size_t size); + +//! Query the usable size of the given memory block (from given pointer to the +//! end of block) +RPMALLOC_EXPORT size_t rpmalloc_usable_size(void *ptr); + +//! Dummy empty function for forcing linker symbol inclusion +RPMALLOC_EXPORT void rpmalloc_linker_reference(void); + +#if RPMALLOC_FIRST_CLASS_HEAPS + +//! Heap type +typedef struct heap_t rpmalloc_heap_t; + +//! Acquire a new heap. Will reuse existing released heaps or allocate memory +//! for a new heap +// if none available. Heap API is implemented with the strict assumption that +// only one single thread will call heap functions for a given heap at any +// given time, no functions are thread safe. +RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_heap_acquire(void); + +//! Release a heap (does NOT free the memory allocated by the heap, use +//! rpmalloc_heap_free_all before destroying the heap). +// Releasing a heap will enable it to be reused by other threads. Safe to pass +// a null pointer. +RPMALLOC_EXPORT void rpmalloc_heap_release(rpmalloc_heap_t *heap); + +//! Allocate a memory block of at least the given size using the given heap. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_alloc(rpmalloc_heap_t *heap, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(2); + +//! Allocate a memory block of at least the given size using the given heap. The +//! returned +// block will have the requested alignment. Alignment must be a power of two +// and a multiple of sizeof(void*), and should ideally be less than memory page +// size. A caveat of rpmalloc internals is that this must also be strictly less +// than the span size (default 64KiB). +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap, size_t alignment, + size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(3); + +//! Allocate a memory block of at least the given size using the given heap and +//! zero initialize it. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_calloc(rpmalloc_heap_t *heap, size_t num, + size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); + +//! Allocate a memory block of at least the given size using the given heap and +//! zero initialize it. The returned +// block will have the requested alignment. Alignment must either be zero, or a +// power of two and a multiple of sizeof(void*), and should ideally be less +// than memory page size. A caveat of rpmalloc internals is that this must also +// be strictly less than the span size (default 64KiB). +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap, size_t alignment, + size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3); + +//! Reallocate the given block to at least the given size. The memory block MUST +//! be allocated +// by the same heap given to this function. +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void * +rpmalloc_heap_realloc(rpmalloc_heap_t *heap, void *ptr, size_t size, + unsigned int flags) RPMALLOC_ATTRIB_MALLOC + RPMALLOC_ATTRIB_ALLOC_SIZE(3); + +//! Reallocate the given block to at least the given size. The memory block MUST +//! be allocated +// by the same heap given to this function. The returned block will have the +// requested alignment. Alignment must be either zero, or a power of two and a +// multiple of sizeof(void*), and should ideally be less than memory page size. +// A caveat of rpmalloc internals is that this must also be strictly less than +// the span size (default 64KiB). +RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *rpmalloc_heap_aligned_realloc( + rpmalloc_heap_t *heap, void *ptr, size_t alignment, size_t size, + unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4); + +//! Free the given memory block from the given heap. The memory block MUST be +//! allocated +// by the same heap given to this function. +RPMALLOC_EXPORT void rpmalloc_heap_free(rpmalloc_heap_t *heap, void *ptr); + +//! Free all memory allocated by the heap +RPMALLOC_EXPORT void rpmalloc_heap_free_all(rpmalloc_heap_t *heap); + +//! Set the given heap as the current heap for the calling thread. A heap MUST +//! only be current heap +// for a single thread, a heap can never be shared between multiple threads. +// The previous current heap for the calling thread is released to be reused by +// other threads. +RPMALLOC_EXPORT void rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap); + +//! Returns which heap the given pointer is allocated on +RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_get_heap_for_ptr(void *ptr); + +#endif + +#ifdef __cplusplus +} +#endif diff --git a/llvm/lib/Support/rpmalloc/rpnew.h b/llvm/lib/Support/rpmalloc/rpnew.h index d8303c6f95652..a18f0799d56d1 100644 --- a/llvm/lib/Support/rpmalloc/rpnew.h +++ b/llvm/lib/Support/rpmalloc/rpnew.h @@ -1,113 +1,113 @@ -//===-------------------------- rpnew.h -----------------*- C -*-=============// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This library provides a cross-platform lock free thread caching malloc -// implementation in C11. -// -//===----------------------------------------------------------------------===// - -#ifdef __cplusplus - -#include -#include - -#ifndef __CRTDECL -#define __CRTDECL -#endif - -extern void __CRTDECL operator delete(void *p) noexcept { rpfree(p); } - -extern void __CRTDECL operator delete[](void *p) noexcept { rpfree(p); } - -extern void *__CRTDECL operator new(std::size_t size) noexcept(false) { - return rpmalloc(size); -} - -extern void *__CRTDECL operator new[](std::size_t size) noexcept(false) { - return rpmalloc(size); -} - -extern void *__CRTDECL operator new(std::size_t size, - const std::nothrow_t &tag) noexcept { - (void)sizeof(tag); - return rpmalloc(size); -} - -extern void *__CRTDECL operator new[](std::size_t size, - const std::nothrow_t &tag) noexcept { - (void)sizeof(tag); - return rpmalloc(size); -} - -#if (__cplusplus >= 201402L || _MSC_VER >= 1916) - -extern void __CRTDECL operator delete(void *p, std::size_t size) noexcept { - (void)sizeof(size); - rpfree(p); -} - -extern void __CRTDECL operator delete[](void *p, std::size_t size) noexcept { - (void)sizeof(size); - rpfree(p); -} - -#endif - -#if (__cplusplus > 201402L || defined(__cpp_aligned_new)) - -extern void __CRTDECL operator delete(void *p, - std::align_val_t align) noexcept { - (void)sizeof(align); - rpfree(p); -} - -extern void __CRTDECL operator delete[](void *p, - std::align_val_t align) noexcept { - (void)sizeof(align); - rpfree(p); -} - -extern void __CRTDECL operator delete(void *p, std::size_t size, - std::align_val_t align) noexcept { - (void)sizeof(size); - (void)sizeof(align); - rpfree(p); -} - -extern void __CRTDECL operator delete[](void *p, std::size_t size, - std::align_val_t align) noexcept { - (void)sizeof(size); - (void)sizeof(align); - rpfree(p); -} - -extern void *__CRTDECL operator new(std::size_t size, - std::align_val_t align) noexcept(false) { - return rpaligned_alloc(static_cast(align), size); -} - -extern void *__CRTDECL operator new[](std::size_t size, - std::align_val_t align) noexcept(false) { - return rpaligned_alloc(static_cast(align), size); -} - -extern void *__CRTDECL operator new(std::size_t size, std::align_val_t align, - const std::nothrow_t &tag) noexcept { - (void)sizeof(tag); - return rpaligned_alloc(static_cast(align), size); -} - -extern void *__CRTDECL operator new[](std::size_t size, std::align_val_t align, - const std::nothrow_t &tag) noexcept { - (void)sizeof(tag); - return rpaligned_alloc(static_cast(align), size); -} - -#endif - -#endif +//===-------------------------- rpnew.h -----------------*- C -*-=============// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This library provides a cross-platform lock free thread caching malloc +// implementation in C11. +// +//===----------------------------------------------------------------------===// + +#ifdef __cplusplus + +#include +#include + +#ifndef __CRTDECL +#define __CRTDECL +#endif + +extern void __CRTDECL operator delete(void *p) noexcept { rpfree(p); } + +extern void __CRTDECL operator delete[](void *p) noexcept { rpfree(p); } + +extern void *__CRTDECL operator new(std::size_t size) noexcept(false) { + return rpmalloc(size); +} + +extern void *__CRTDECL operator new[](std::size_t size) noexcept(false) { + return rpmalloc(size); +} + +extern void *__CRTDECL operator new(std::size_t size, + const std::nothrow_t &tag) noexcept { + (void)sizeof(tag); + return rpmalloc(size); +} + +extern void *__CRTDECL operator new[](std::size_t size, + const std::nothrow_t &tag) noexcept { + (void)sizeof(tag); + return rpmalloc(size); +} + +#if (__cplusplus >= 201402L || _MSC_VER >= 1916) + +extern void __CRTDECL operator delete(void *p, std::size_t size) noexcept { + (void)sizeof(size); + rpfree(p); +} + +extern void __CRTDECL operator delete[](void *p, std::size_t size) noexcept { + (void)sizeof(size); + rpfree(p); +} + +#endif + +#if (__cplusplus > 201402L || defined(__cpp_aligned_new)) + +extern void __CRTDECL operator delete(void *p, + std::align_val_t align) noexcept { + (void)sizeof(align); + rpfree(p); +} + +extern void __CRTDECL operator delete[](void *p, + std::align_val_t align) noexcept { + (void)sizeof(align); + rpfree(p); +} + +extern void __CRTDECL operator delete(void *p, std::size_t size, + std::align_val_t align) noexcept { + (void)sizeof(size); + (void)sizeof(align); + rpfree(p); +} + +extern void __CRTDECL operator delete[](void *p, std::size_t size, + std::align_val_t align) noexcept { + (void)sizeof(size); + (void)sizeof(align); + rpfree(p); +} + +extern void *__CRTDECL operator new(std::size_t size, + std::align_val_t align) noexcept(false) { + return rpaligned_alloc(static_cast(align), size); +} + +extern void *__CRTDECL operator new[](std::size_t size, + std::align_val_t align) noexcept(false) { + return rpaligned_alloc(static_cast(align), size); +} + +extern void *__CRTDECL operator new(std::size_t size, std::align_val_t align, + const std::nothrow_t &tag) noexcept { + (void)sizeof(tag); + return rpaligned_alloc(static_cast(align), size); +} + +extern void *__CRTDECL operator new[](std::size_t size, std::align_val_t align, + const std::nothrow_t &tag) noexcept { + (void)sizeof(tag); + return rpaligned_alloc(static_cast(align), size); +} + +#endif + +#endif diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index a4c1e265f0e63..2ffc36706fb64 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -8086,13 +8086,76 @@ static SDValue getZT0FrameIndex(MachineFrameInfo &MFI, DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); } +// Emit a call to __arm_sme_save or __arm_sme_restore. +static SDValue emitSMEStateSaveRestore(const AArch64TargetLowering &TLI, + SelectionDAG &DAG, + AArch64FunctionInfo *Info, SDLoc DL, + SDValue Chain, bool IsSave) { + MachineFunction &MF = DAG.getMachineFunction(); + AArch64FunctionInfo *FuncInfo = MF.getInfo(); + FuncInfo->setSMESaveBufferUsed(); + TargetLowering::ArgListTy Args; + Args.emplace_back( + DAG.getCopyFromReg(Chain, DL, Info->getSMESaveBufferAddr(), MVT::i64), + PointerType::getUnqual(*DAG.getContext())); + + RTLIB::Libcall LC = + IsSave ? RTLIB::SMEABI_SME_SAVE : RTLIB::SMEABI_SME_RESTORE; + SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), + TLI.getPointerTy(DAG.getDataLayout())); + auto *RetTy = Type::getVoidTy(*DAG.getContext()); + TargetLowering::CallLoweringInfo CLI(DAG); + CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( + TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)); + return TLI.LowerCallTo(CLI).second; +} + +static SDValue emitRestoreZALazySave(SDValue Chain, SDLoc DL, + const AArch64TargetLowering &TLI, + const AArch64RegisterInfo &TRI, + AArch64FunctionInfo &FuncInfo, + SelectionDAG &DAG) { + // Conditionally restore the lazy save using a pseudo node. + RTLIB::Libcall LC = RTLIB::SMEABI_TPIDR2_RESTORE; + TPIDR2Object &TPIDR2 = FuncInfo.getTPIDR2Obj(); + SDValue RegMask = DAG.getRegisterMask(TRI.getCallPreservedMask( + DAG.getMachineFunction(), TLI.getLibcallCallingConv(LC))); + SDValue RestoreRoutine = DAG.getTargetExternalSymbol( + TLI.getLibcallName(LC), TLI.getPointerTy(DAG.getDataLayout())); + SDValue TPIDR2_EL0 = DAG.getNode( + ISD::INTRINSIC_W_CHAIN, DL, MVT::i64, Chain, + DAG.getConstant(Intrinsic::aarch64_sme_get_tpidr2, DL, MVT::i32)); + // Copy the address of the TPIDR2 block into X0 before 'calling' the + // RESTORE_ZA pseudo. + SDValue Glue; + SDValue TPIDR2Block = DAG.getFrameIndex( + TPIDR2.FrameIndex, + DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); + Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, TPIDR2Block, Glue); + Chain = + DAG.getNode(AArch64ISD::RESTORE_ZA, DL, MVT::Other, + {Chain, TPIDR2_EL0, DAG.getRegister(AArch64::X0, MVT::i64), + RestoreRoutine, RegMask, Chain.getValue(1)}); + // Finally reset the TPIDR2_EL0 register to 0. + Chain = DAG.getNode( + ISD::INTRINSIC_VOID, DL, MVT::Other, Chain, + DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32), + DAG.getConstant(0, DL, MVT::i64)); + TPIDR2.Uses++; + return Chain; +} + SDValue AArch64TargetLowering::lowerEHPadEntry(SDValue Chain, SDLoc const &DL, SelectionDAG &DAG) const { assert(Chain.getOpcode() == ISD::EntryToken && "Unexpected Chain value"); SDValue Glue = Chain.getValue(1); MachineFunction &MF = DAG.getMachineFunction(); - SMEAttrs SMEFnAttrs = MF.getInfo()->getSMEFnAttrs(); + auto &FuncInfo = *MF.getInfo(); + auto &Subtarget = DAG.getSubtarget(); + const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); + + SMEAttrs SMEFnAttrs = FuncInfo.getSMEFnAttrs(); // The following conditions are true on entry to an exception handler: // - PSTATE.SM is 0. @@ -8107,14 +8170,43 @@ SDValue AArch64TargetLowering::lowerEHPadEntry(SDValue Chain, SDLoc const &DL, // These mode changes are usually optimized away in catch blocks as they // occur before the __cxa_begin_catch (which is a non-streaming function), // but are necessary in some cases (such as for cleanups). + // + // Additionally, if the function has ZA or ZT0 state, we must restore it. + // [COND_]SMSTART SM if (SMEFnAttrs.hasStreamingInterfaceOrBody()) - return changeStreamingMode(DAG, DL, /*Enable=*/true, Chain, - /*Glue*/ Glue, AArch64SME::Always); + Chain = changeStreamingMode(DAG, DL, /*Enable=*/true, Chain, + /*Glue*/ Glue, AArch64SME::Always); + else if (SMEFnAttrs.hasStreamingCompatibleInterface()) + Chain = changeStreamingMode(DAG, DL, /*Enable=*/true, Chain, Glue, + AArch64SME::IfCallerIsStreaming); + + if (getTM().useNewSMEABILowering()) + return Chain; - if (SMEFnAttrs.hasStreamingCompatibleInterface()) - return changeStreamingMode(DAG, DL, /*Enable=*/true, Chain, Glue, - AArch64SME::IfCallerIsStreaming); + if (SMEFnAttrs.hasAgnosticZAInterface()) { + // Restore full ZA + Chain = emitSMEStateSaveRestore(*this, DAG, &FuncInfo, DL, Chain, + /*IsSave=*/false); + } else if (SMEFnAttrs.hasZAState() || SMEFnAttrs.hasZT0State()) { + // SMSTART ZA + Chain = DAG.getNode( + AArch64ISD::SMSTART, DL, DAG.getVTList(MVT::Other, MVT::Glue), Chain, + DAG.getTargetConstant(int32_t(AArch64SVCR::SVCRZA), DL, MVT::i32)); + + // Restore ZT0 + if (SMEFnAttrs.hasZT0State()) { + SDValue ZT0FrameIndex = + getZT0FrameIndex(MF.getFrameInfo(), FuncInfo, DAG); + Chain = + DAG.getNode(AArch64ISD::RESTORE_ZT, DL, DAG.getVTList(MVT::Other), + {Chain, DAG.getConstant(0, DL, MVT::i32), ZT0FrameIndex}); + } + + // Restore ZA + if (SMEFnAttrs.hasZAState()) + Chain = emitRestoreZALazySave(Chain, DL, *this, TRI, FuncInfo, DAG); + } return Chain; } @@ -9232,30 +9324,6 @@ SDValue AArch64TargetLowering::changeStreamingMode( return GetCheckVL(SMChange.getValue(0), SMChange.getValue(1)); } -// Emit a call to __arm_sme_save or __arm_sme_restore. -static SDValue emitSMEStateSaveRestore(const AArch64TargetLowering &TLI, - SelectionDAG &DAG, - AArch64FunctionInfo *Info, SDLoc DL, - SDValue Chain, bool IsSave) { - MachineFunction &MF = DAG.getMachineFunction(); - AArch64FunctionInfo *FuncInfo = MF.getInfo(); - FuncInfo->setSMESaveBufferUsed(); - TargetLowering::ArgListTy Args; - Args.emplace_back( - DAG.getCopyFromReg(Chain, DL, Info->getSMESaveBufferAddr(), MVT::i64), - PointerType::getUnqual(*DAG.getContext())); - - RTLIB::Libcall LC = - IsSave ? RTLIB::SMEABI_SME_SAVE : RTLIB::SMEABI_SME_RESTORE; - SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), - TLI.getPointerTy(DAG.getDataLayout())); - auto *RetTy = Type::getVoidTy(*DAG.getContext()); - TargetLowering::CallLoweringInfo CLI(DAG); - CLI.setDebugLoc(DL).setChain(Chain).setLibCallee( - TLI.getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)); - return TLI.LowerCallTo(CLI).second; -} - static AArch64SME::ToggleCondition getSMToggleCondition(const SMECallAttrs &CallAttrs) { if (!CallAttrs.caller().hasStreamingCompatibleInterface() || @@ -10015,33 +10083,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, {Result, DAG.getConstant(0, DL, MVT::i32), ZTFrameIdx}); if (RequiresLazySave) { - // Conditionally restore the lazy save using a pseudo node. - RTLIB::Libcall LC = RTLIB::SMEABI_TPIDR2_RESTORE; - TPIDR2Object &TPIDR2 = FuncInfo->getTPIDR2Obj(); - SDValue RegMask = DAG.getRegisterMask( - TRI->getCallPreservedMask(MF, getLibcallCallingConv(LC))); - SDValue RestoreRoutine = DAG.getTargetExternalSymbol( - getLibcallName(LC), getPointerTy(DAG.getDataLayout())); - SDValue TPIDR2_EL0 = DAG.getNode( - ISD::INTRINSIC_W_CHAIN, DL, MVT::i64, Result, - DAG.getConstant(Intrinsic::aarch64_sme_get_tpidr2, DL, MVT::i32)); - // Copy the address of the TPIDR2 block into X0 before 'calling' the - // RESTORE_ZA pseudo. - SDValue Glue; - SDValue TPIDR2Block = DAG.getFrameIndex( - TPIDR2.FrameIndex, - DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout())); - Result = DAG.getCopyToReg(Result, DL, AArch64::X0, TPIDR2Block, Glue); - Result = - DAG.getNode(AArch64ISD::RESTORE_ZA, DL, MVT::Other, - {Result, TPIDR2_EL0, DAG.getRegister(AArch64::X0, MVT::i64), - RestoreRoutine, RegMask, Result.getValue(1)}); - // Finally reset the TPIDR2_EL0 register to 0. - Result = DAG.getNode( - ISD::INTRINSIC_VOID, DL, MVT::Other, Result, - DAG.getConstant(Intrinsic::aarch64_sme_set_tpidr2, DL, MVT::i32), - DAG.getConstant(0, DL, MVT::i64)); - TPIDR2.Uses++; + Result = emitRestoreZALazySave(Result, DL, *this, *TRI, *FuncInfo, DAG); } else if (RequiresSaveAllZA) { Result = emitSMEStateSaveRestore(*this, DAG, FuncInfo, DL, Result, /*IsSave=*/false); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 04b3c90c2e177..f788c7510f80c 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -9907,8 +9907,14 @@ def : Pat<(v4bf16 (bitconvert (v2f32 FPR64:$src))), def : Pat<(v4bf16 (bitconvert (v1f64 FPR64:$src))), (v4bf16 (REV64v4i16 FPR64:$src))>; } -def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>; -def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), (v4bf16 FPR64:$src)>; +def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), + (v4f16 FPR64:$src)>; +def : Pat<(v4f16 (bitconvert (v4bf16 FPR64:$src))), + (v4f16 FPR64:$src)>; +def : Pat<(v4bf16 (bitconvert (v4i16 FPR64:$src))), + (v4bf16 FPR64:$src)>; +def : Pat<(v4bf16 (bitconvert (v4f16 FPR64:$src))), + (v4bf16 FPR64:$src)>; let Predicates = [IsLE] in { def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>; @@ -10236,8 +10242,14 @@ def : Pat<(v8bf16 (bitconvert (v2f64 FPR128:$src))), def : Pat<(v8bf16 (bitconvert (v4f32 FPR128:$src))), (v8bf16 (REV32v8i16 FPR128:$src))>; } -def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>; -def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), (v8bf16 FPR128:$src)>; +def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), + (v8f16 FPR128:$src)>; +def : Pat<(v8bf16 (bitconvert (v8i16 FPR128:$src))), + (v8bf16 FPR128:$src)>; +def : Pat<(v8f16 (bitconvert (v8bf16 FPR128:$src))), + (v8f16 FPR128:$src)>; +def : Pat<(v8bf16 (bitconvert (v8f16 FPR128:$src))), + (v8bf16 FPR128:$src)>; let Predicates = [IsLE] in { def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>; diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp index ea2196a584127..7ee54c5932b15 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -444,7 +444,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) // Regardless of FP16 support, widen 16-bit elements to 32-bits. .minScalar(0, s32) .libcallFor({s32, s64, s128}); - getActionDefinitionsBuilder(G_FPOWI) + getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP}) .scalarize(0) .minScalar(0, s32) .libcallFor({{s32, s32}, {s64, s32}, {s128, s32}}); diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp index dfbde85231a6e..a67a7bedf19a3 100644 --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -1807,7 +1807,8 @@ class AMDGPUAsmParser : public MCTargetAsmParser { const OperandVector &Operands) const; SMLoc getInstLoc(const OperandVector &Operands) const; - bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc, const OperandVector &Operands); + bool validateInstruction(const MCInst &Inst, SMLoc IDLoc, + const OperandVector &Operands); bool validateOffset(const MCInst &Inst, const OperandVector &Operands); bool validateFlatOffset(const MCInst &Inst, const OperandVector &Operands); bool validateSMEMOffset(const MCInst &Inst, const OperandVector &Operands); @@ -1824,8 +1825,8 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool validateMIMGAtomicDMask(const MCInst &Inst); bool validateMIMGGatherDMask(const MCInst &Inst); bool validateMovrels(const MCInst &Inst, const OperandVector &Operands); - bool validateMIMGDataSize(const MCInst &Inst, const SMLoc &IDLoc); - bool validateMIMGAddrSize(const MCInst &Inst, const SMLoc &IDLoc); + bool validateMIMGDataSize(const MCInst &Inst, SMLoc IDLoc); + bool validateMIMGAddrSize(const MCInst &Inst, SMLoc IDLoc); bool validateMIMGD16(const MCInst &Inst); bool validateMIMGDim(const MCInst &Inst, const OperandVector &Operands); bool validateTensorR128(const MCInst &Inst); @@ -1847,7 +1848,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool validateDivScale(const MCInst &Inst); bool validateWaitCnt(const MCInst &Inst, const OperandVector &Operands); bool validateCoherencyBits(const MCInst &Inst, const OperandVector &Operands, - const SMLoc &IDLoc); + SMLoc IDLoc); bool validateTHAndScopeBits(const MCInst &Inst, const OperandVector &Operands, const unsigned CPol); bool validateTFE(const MCInst &Inst, const OperandVector &Operands); @@ -1864,7 +1865,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser { bool isSupportedMnemo(StringRef Mnemo, const FeatureBitset &FBS, ArrayRef Variants); - bool checkUnsupportedInstruction(StringRef Name, const SMLoc &IDLoc); + bool checkUnsupportedInstruction(StringRef Name, SMLoc IDLoc); bool isId(const StringRef Id) const; bool isId(const AsmToken &Token, const StringRef Id) const; @@ -4087,8 +4088,7 @@ bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) { constexpr uint64_t MIMGFlags = SIInstrFlags::MIMG | SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE; -bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst, - const SMLoc &IDLoc) { +bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst, SMLoc IDLoc) { const unsigned Opc = Inst.getOpcode(); const MCInstrDesc &Desc = MII.get(Opc); @@ -4135,8 +4135,7 @@ bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst, return false; } -bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst, - const SMLoc &IDLoc) { +bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst, SMLoc IDLoc) { const unsigned Opc = Inst.getOpcode(); const MCInstrDesc &Desc = MII.get(Opc); @@ -5344,7 +5343,7 @@ bool AMDGPUAsmParser::validateGWS(const MCInst &Inst, bool AMDGPUAsmParser::validateCoherencyBits(const MCInst &Inst, const OperandVector &Operands, - const SMLoc &IDLoc) { + SMLoc IDLoc) { int CPolPos = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::cpol); if (CPolPos == -1) @@ -5541,8 +5540,7 @@ bool AMDGPUAsmParser::validateWMMA(const MCInst &Inst, validateFmt(AMDGPU::OpName::matrix_b_fmt, AMDGPU::OpName::src1); } -bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst, - const SMLoc &IDLoc, +bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst, SMLoc IDLoc, const OperandVector &Operands) { if (!validateLdsDirect(Inst, Operands)) return false; @@ -5704,7 +5702,7 @@ bool AMDGPUAsmParser::isSupportedMnemo(StringRef Mnemo, } bool AMDGPUAsmParser::checkUnsupportedInstruction(StringRef Mnemo, - const SMLoc &IDLoc) { + SMLoc IDLoc) { FeatureBitset FBS = ComputeAvailableFeatures(getFeatureBits()); // Check if requested instruction variant is supported. diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index 51c56ecea2c96..fed37788802b9 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -1313,6 +1313,15 @@ void SIFoldOperandsImpl::foldOperand( if (MovSrcRC) { if (UseSubReg) MovSrcRC = TRI->getMatchingSuperRegClass(SrcRC, MovSrcRC, UseSubReg); + + // FIXME: We should be able to directly check immediate operand legality + // for all cases, but gfx908 hacks break. + if (MovOp == AMDGPU::AV_MOV_B32_IMM_PSEUDO && + (!OpToFold.isImm() || + !TII->isImmOperandLegal(MovDesc, SrcIdx, + *OpToFold.getEffectiveImmVal()))) + break; + if (!MRI->constrainRegClass(SrcReg, MovSrcRC)) break; diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index 7ec98851d0bef..f291191dbfd5c 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -2730,7 +2730,7 @@ bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML, HasVMemStore = true; } for (const MachineOperand &Op : MI.all_uses()) { - if (!TRI->isVectorRegister(*MRI, Op.getReg())) + if (Op.isDebug() || !TRI->isVectorRegister(*MRI, Op.getReg())) continue; RegInterval Interval = Brackets.getRegInterval(&MI, MRI, TRI, Op); // Vgpr use diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index d4c1bc6d84384..59fd2f10ccacd 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -2287,8 +2287,9 @@ def : GCNPat < def : GCNPat < (fcopysign fp16vt:$src0, f32:$src1), - (EXTRACT_SUBREG (V_BFI_B32_e64 (S_MOV_B32 (i32 0x7fff0000)), - (REG_SEQUENCE VGPR_32, (i16 (IMPLICIT_DEF)), lo16, $src0, hi16), $src1), hi16) + (EXTRACT_SUBREG (V_BFI_B32_e64 (S_MOV_B32 (i32 0x00007fff)), + (REG_SEQUENCE VGPR_32, $src0, lo16, (i16 (IMPLICIT_DEF)), hi16), + (V_LSHRREV_B32_e64 (i32 16), $src1)), lo16) >; def : GCNPat < @@ -2660,11 +2661,11 @@ let True16Predicate = NotHasTrue16BitInsts in { let SubtargetPredicate = isNotGFX9Plus in { def : ROTRPattern ; -def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (and i32:$src1, (i32 31))))), (V_ALIGNBIT_B32_e64 (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), (i32 (EXTRACT_SUBREG (i64 $src0), sub0)), $src1)>; -def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), (V_ALIGNBIT_B32_e64 (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), (i32 (EXTRACT_SUBREG (i64 $src0), sub0)), $src1)>; } // isNotGFX9Plus @@ -2678,8 +2679,8 @@ def : GCNPat < $src1, /* clamp */ 0, /* op_sel */ 0) >; -foreach pat = [(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), - (i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1))))] in +foreach pat = [(i32 (DivergentUnaryFrag (srl i64:$src0, (and i32:$src1, (i32 31))))), + (i32 (DivergentUnaryFrag (srl i64:$src0, (i32 ShiftAmt32Imm:$src1))))] in def : GCNPat; -def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), (V_ALIGNBIT_B32_t16_e64 0, /* src0_modifiers */ (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), 0, /* src1_modifiers */ @@ -2734,7 +2735,7 @@ def : GCNPat < $src1, /* clamp */ 0, /* op_sel */ 0) >; -def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (and i32:$src1, (i32 31))))), (V_ALIGNBIT_B32_fake16_e64 0, /* src0_modifiers */ (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), 0, /* src1_modifiers */ @@ -2743,7 +2744,7 @@ def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))), $src1, /* clamp */ 0, /* op_sel */ 0) >; -def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), +def : GCNPat<(i32 (DivergentUnaryFrag (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))), (V_ALIGNBIT_B32_fake16_e64 0, /* src0_modifiers */ (i32 (EXTRACT_SUBREG (i64 $src0), sub1)), 0, /* src1_modifiers */ diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index 1c42f44765abf..1f773e2a7e0fc 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -610,25 +610,41 @@ void ARMAsmPrinter::emitEndOfAsmFile(Module &M) { // to appear in the .ARM.attributes section in ELF. // Instead of subclassing the MCELFStreamer, we do the work here. - // Returns true if all functions have the same function attribute value. - // It also returns true when the module has no functions. +// Returns true if all function definitions have the same function attribute +// value. It also returns true when the module has no functions. static bool checkFunctionsAttributeConsistency(const Module &M, StringRef Attr, StringRef Value) { - return !any_of(M, [&](const Function &F) { - return F.getFnAttribute(Attr).getValueAsString() != Value; - }); + return !any_of(M, [&](const Function &F) { + if (F.isDeclaration()) + return false; + return F.getFnAttribute(Attr).getValueAsString() != Value; + }); } -// Returns true if all functions have the same denormal mode. +// Returns true if all functions definitions have the same denormal mode. // It also returns true when the module has no functions. -static bool checkDenormalAttributeConsistency(const Module &M, - StringRef Attr, +static bool checkDenormalAttributeConsistency(const Module &M, StringRef Attr, DenormalMode Value) { return !any_of(M, [&](const Function &F) { + if (F.isDeclaration()) + return false; StringRef AttrVal = F.getFnAttribute(Attr).getValueAsString(); return parseDenormalFPAttribute(AttrVal) != Value; }); } +// Returns true if all functions have different denormal modes. +static bool checkDenormalAttributeInconsistency(const Module &M) { + auto F = M.functions().begin(); + auto E = M.functions().end(); + if (F == E) + return false; + DenormalMode Value = F->getDenormalModeRaw(); + ++F; + return std::any_of(F, E, [&](const Function &F) { + return !F.isDeclaration() && F.getDenormalModeRaw() != Value; + }); +} + void ARMAsmPrinter::emitAttributes() { MCTargetStreamer &TS = *OutStreamer->getTargetStreamer(); ARMTargetStreamer &ATS = static_cast(TS); @@ -695,7 +711,9 @@ void ARMAsmPrinter::emitAttributes() { DenormalMode::getPositiveZero())) ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::PositiveZero); - else if (!TM.Options.UnsafeFPMath) + else if (checkDenormalAttributeInconsistency(*MMI->getModule()) || + checkDenormalAttributeConsistency( + *MMI->getModule(), "denormal-fp-math", DenormalMode::getIEEE())) ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::IEEEDenormals); else { @@ -730,7 +748,7 @@ void ARMAsmPrinter::emitAttributes() { TM.Options.NoTrappingFPMath) ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions, ARMBuildAttrs::Not_Allowed); - else if (!TM.Options.UnsafeFPMath) { + else { ATS.emitAttribute(ARMBuildAttrs::ABI_FP_exceptions, ARMBuildAttrs::Allowed); // If the user has permitted this code to choose the IEEE 754 diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 9a247bb5a83d9..f4ac6bb76b3fe 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -5573,7 +5573,7 @@ static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, llvm_unreachable("Unknown VFP cmp argument!"); } -/// OptimizeVFPBrcond - With nnan, it's legal to optimize some +/// OptimizeVFPBrcond - With nnan and without daz, it's legal to optimize some /// f32 and even f64 comparisons to integer ones. SDValue ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { @@ -5729,9 +5729,9 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { } SDNodeFlags Flags = Op->getFlags(); - if ((getTargetMachine().Options.UnsafeFPMath || Flags.hasNoNaNs()) && - (DAG.getDenormalMode(MVT::f32) == DenormalMode::getIEEE() && - DAG.getDenormalMode(MVT::f64) == DenormalMode::getIEEE()) && + if (Flags.hasNoNaNs() && + DAG.getDenormalMode(MVT::f32) == DenormalMode::getIEEE() && + DAG.getDenormalMode(MVT::f64) == DenormalMode::getIEEE() && (CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETNE || CC == ISD::SETUNE)) { if (SDValue Result = OptimizeVFPBrcond(Op, DAG)) @@ -20428,9 +20428,9 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, if (CVal >= -255 && CVal <= -1) break; } else { - // This must be a constant between -4095 and 4095. It is not clear - // what this constraint is intended for. Implemented for - // compatibility with GCC. + // This must be a constant between -4095 and 4095. This is suitable + // for use as the immediate offset field in LDR and STR instructions + // such as LDR r0,[r1,#offset]. if (CVal >= -4095 && CVal <= 4095) break; } diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp index f0592945b2051..3329beab63ddf 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.cpp +++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp @@ -222,8 +222,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default. const FeatureBitset &Bits = getFeatureBits(); if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters - (Options.UnsafeFPMath || isTargetDarwin() || - DM == DenormalMode::getPreserveSign())) + (isTargetDarwin() || DM == DenormalMode::getPreserveSign())) HasNEONForFP = true; if (isRWPI()) diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 0e974838a7c6b..f60660b12baca 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -135,17 +135,17 @@ class UnwindContext { MCRegister getFPReg() const { return FPReg; } void emitFnStartLocNotes() const { - for (const SMLoc &Loc : FnStartLocs) + for (SMLoc Loc : FnStartLocs) Parser.Note(Loc, ".fnstart was specified here"); } void emitCantUnwindLocNotes() const { - for (const SMLoc &Loc : CantUnwindLocs) + for (SMLoc Loc : CantUnwindLocs) Parser.Note(Loc, ".cantunwind was specified here"); } void emitHandlerDataLocNotes() const { - for (const SMLoc &Loc : HandlerDataLocs) + for (SMLoc Loc : HandlerDataLocs) Parser.Note(Loc, ".handlerdata was specified here"); } diff --git a/llvm/lib/Target/DirectX/DXILPostOptimizationValidation.cpp b/llvm/lib/Target/DirectX/DXILPostOptimizationValidation.cpp index 7e93474e73118..6e95a4232fabe 100644 --- a/llvm/lib/Target/DirectX/DXILPostOptimizationValidation.cpp +++ b/llvm/lib/Target/DirectX/DXILPostOptimizationValidation.cpp @@ -160,6 +160,41 @@ tripleToVisibility(llvm::Triple::EnvironmentType ET) { } } +static void reportIfDeniedShaderStageAccess(Module &M, + const dxbc::RootFlags &Flags, + const dxbc::RootFlags &Mask) { + if ((Flags & Mask) != Mask) + return; + + SmallString<128> Message; + raw_svector_ostream OS(Message); + OS << "Shader has root bindings but root signature uses a DENY flag to " + "disallow root binding access to the shader stage."; + M.getContext().diagnose(DiagnosticInfoGeneric(Message)); +} + +static std::optional +getEnvironmentDenyFlagMask(Triple::EnvironmentType ShaderProfile) { + switch (ShaderProfile) { + case Triple::Pixel: + return dxbc::RootFlags::DenyPixelShaderRootAccess; + case Triple::Vertex: + return dxbc::RootFlags::DenyVertexShaderRootAccess; + case Triple::Geometry: + return dxbc::RootFlags::DenyGeometryShaderRootAccess; + case Triple::Hull: + return dxbc::RootFlags::DenyHullShaderRootAccess; + case Triple::Domain: + return dxbc::RootFlags::DenyDomainShaderRootAccess; + case Triple::Mesh: + return dxbc::RootFlags::DenyMeshShaderRootAccess; + case Triple::Amplification: + return dxbc::RootFlags::DenyAmplificationShaderRootAccess; + default: + return std::nullopt; + } +} + static void validateRootSignature(Module &M, const mcdxbc::RootSignatureDesc &RSD, dxil::ModuleMetadataInfo &MMI, @@ -225,7 +260,9 @@ static void validateRootSignature(Module &M, Builder.findOverlapping(ReportedBinding); reportOverlappingRegisters(M, ReportedBinding, Overlaping); }); + const hlsl::BoundRegs &BoundRegs = Builder.takeBoundRegs(); + bool HasBindings = false; for (const ResourceInfo &RI : DRM) { const ResourceInfo::ResourceBinding &Binding = RI.getBinding(); const dxil::ResourceTypeInfo &RTI = DRTM[RI.getHandleTy()]; @@ -236,22 +273,33 @@ static void validateRootSignature(Module &M, BoundRegs.findBoundReg(RC, Binding.Space, Binding.LowerBound, Binding.LowerBound + Binding.Size - 1); - if (Reg != nullptr) { - const auto *ParamInfo = - static_cast(Reg->Cookie); - - if (RC != ResourceClass::SRV && RC != ResourceClass::UAV) - continue; + if (!Reg) { + reportRegNotBound(M, RC, Binding); + continue; + } - if (ParamInfo->Type == dxbc::RootParameterType::DescriptorTable) - continue; + const auto *ParamInfo = + static_cast(Reg->Cookie); - if (RK != ResourceKind::RawBuffer && RK != ResourceKind::StructuredBuffer) - reportInvalidHandleTyError(M, RC, Binding); - } else { - reportRegNotBound(M, RC, Binding); + bool IsSRVOrUAV = RC == ResourceClass::SRV || RC == ResourceClass::UAV; + bool IsDescriptorTable = + ParamInfo->Type == dxbc::RootParameterType::DescriptorTable; + bool IsRawOrStructuredBuffer = + RK != ResourceKind::RawBuffer && RK != ResourceKind::StructuredBuffer; + if (IsSRVOrUAV && !IsDescriptorTable && IsRawOrStructuredBuffer) { + reportInvalidHandleTyError(M, RC, Binding); + continue; } + + HasBindings = true; } + + if (!HasBindings) + return; + + if (std::optional Mask = + getEnvironmentDenyFlagMask(MMI.ShaderProfile)) + reportIfDeniedShaderStageAccess(M, dxbc::RootFlags(RSD.Flags), *Mask); } static mcdxbc::RootSignatureDesc * diff --git a/llvm/lib/Target/Hexagon/RDFCopy.cpp b/llvm/lib/Target/Hexagon/RDFCopy.cpp index fafdad08909dd..3b1d3bd89680b 100644 --- a/llvm/lib/Target/Hexagon/RDFCopy.cpp +++ b/llvm/lib/Target/Hexagon/RDFCopy.cpp @@ -108,7 +108,7 @@ bool CopyPropagation::scanBlock(MachineBasicBlock *B) { for (NodeAddr IA : BA.Addr->members(DFG)) { if (DFG.IsCode(IA)) { NodeAddr SA = IA; - EqualityMap EM(std::less(DFG.getPRI())); + EqualityMap EM(RegisterRefLess(DFG.getPRI())); if (interpretAsCopy(SA.Addr->getCode(), EM)) recordCopy(SA, EM); } diff --git a/llvm/lib/Target/Hexagon/RDFCopy.h b/llvm/lib/Target/Hexagon/RDFCopy.h index e4fb89892831d..92b2c65982655 100644 --- a/llvm/lib/Target/Hexagon/RDFCopy.h +++ b/llvm/lib/Target/Hexagon/RDFCopy.h @@ -25,8 +25,8 @@ class MachineInstr; namespace rdf { struct CopyPropagation { - CopyPropagation(DataFlowGraph &dfg) : MDT(dfg.getDT()), DFG(dfg), - RDefMap(std::less(DFG.getPRI())) {} + CopyPropagation(DataFlowGraph &dfg) + : MDT(dfg.getDT()), DFG(dfg), RDefMap(RegisterRefLess(DFG.getPRI())) {} virtual ~CopyPropagation() = default; @@ -35,7 +35,7 @@ namespace rdf { bool trace() const { return Trace; } DataFlowGraph &getDFG() { return DFG; } - using EqualityMap = std::map; + using EqualityMap = std::map; virtual bool interpretAsCopy(const MachineInstr *MI, EqualityMap &EM); private: @@ -45,7 +45,7 @@ namespace rdf { bool Trace = false; // map: register -> (map: stmt -> reaching def) - std::map> RDefMap; + std::map, RegisterRefLess> RDefMap; // map: statement -> (map: dst reg -> src reg) std::map CopyMap; std::vector Copies; diff --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp index 07e722b9a6591..442f0a46a4983 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp @@ -113,10 +113,11 @@ void LoongArchDAGToDAGISel::Select(SDNode *Node) { APInt SplatValue, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; - unsigned Op; + unsigned Op = 0; EVT ResTy = BVN->getValueType(0); bool Is128Vec = BVN->getValueType(0).is128BitVector(); bool Is256Vec = BVN->getValueType(0).is256BitVector(); + SDNode *Res; if (!Subtarget->hasExtLSX() || (!Is128Vec && !Is256Vec)) break; @@ -124,26 +125,25 @@ void LoongArchDAGToDAGISel::Select(SDNode *Node) { HasAnyUndefs, 8)) break; - switch (SplatBitSize) { - default: - break; - case 8: - Op = Is256Vec ? LoongArch::PseudoXVREPLI_B : LoongArch::PseudoVREPLI_B; - break; - case 16: - Op = Is256Vec ? LoongArch::PseudoXVREPLI_H : LoongArch::PseudoVREPLI_H; - break; - case 32: - Op = Is256Vec ? LoongArch::PseudoXVREPLI_W : LoongArch::PseudoVREPLI_W; - break; - case 64: - Op = Is256Vec ? LoongArch::PseudoXVREPLI_D : LoongArch::PseudoVREPLI_D; - break; - } - - SDNode *Res; // If we have a signed 10 bit integer, we can splat it directly. if (SplatValue.isSignedIntN(10)) { + switch (SplatBitSize) { + default: + break; + case 8: + Op = Is256Vec ? LoongArch::PseudoXVREPLI_B : LoongArch::PseudoVREPLI_B; + break; + case 16: + Op = Is256Vec ? LoongArch::PseudoXVREPLI_H : LoongArch::PseudoVREPLI_H; + break; + case 32: + Op = Is256Vec ? LoongArch::PseudoXVREPLI_W : LoongArch::PseudoVREPLI_W; + break; + case 64: + Op = Is256Vec ? LoongArch::PseudoXVREPLI_D : LoongArch::PseudoVREPLI_D; + break; + } + EVT EleType = ResTy.getVectorElementType(); APInt Val = SplatValue.sextOrTrunc(EleType.getSizeInBits()); SDValue Imm = CurDAG->getTargetConstant(Val, DL, EleType); @@ -151,6 +151,21 @@ void LoongArchDAGToDAGISel::Select(SDNode *Node) { ReplaceNode(Node, Res); return; } + + // Select appropriate [x]vldi instructions for some special constant splats, + // where the immediate value `imm[12] == 1` for used [x]vldi instructions. + const auto &TLI = + *static_cast(getTargetLowering()); + std::pair ConvertVLDI = + TLI.isImmVLDILegalForMode1(SplatValue, SplatBitSize); + if (ConvertVLDI.first) { + Op = Is256Vec ? LoongArch::XVLDI : LoongArch::VLDI; + SDValue Imm = CurDAG->getSignedTargetConstant( + SignExtend32<13>(ConvertVLDI.second), DL, MVT::i32); + Res = CurDAG->getMachineNode(Op, DL, ResTy, Imm); + ReplaceNode(Node, Res); + return; + } break; } } diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 5d4a8fd080202..4cfbfca45d359 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -340,6 +340,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, {ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, VT, Expand); setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); + setOperationAction(ISD::ABS, VT, Legal); setOperationAction(ISD::ABDS, VT, Legal); setOperationAction(ISD::ABDU, VT, Legal); setOperationAction(ISD::SADDSAT, VT, Legal); @@ -419,6 +420,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, {ISD::SETNE, ISD::SETGE, ISD::SETGT, ISD::SETUGE, ISD::SETUGT}, VT, Expand); setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); + setOperationAction(ISD::ABS, VT, Legal); setOperationAction(ISD::ABDS, VT, Legal); setOperationAction(ISD::ABDU, VT, Legal); setOperationAction(ISD::SADDSAT, VT, Legal); @@ -2317,6 +2319,53 @@ static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef Mask, return DAG.getNode(LoongArchISD::VPICKOD, DL, VT, V2, V1); } +/// Lower VECTOR_SHUFFLE into XVINSVE0 (if possible). +static SDValue +lowerVECTOR_SHUFFLE_XVINSVE0(const SDLoc &DL, ArrayRef Mask, MVT VT, + SDValue V1, SDValue V2, SelectionDAG &DAG, + const LoongArchSubtarget &Subtarget) { + // LoongArch LASX only supports xvinsve0.{w/d}. + if (VT != MVT::v8i32 && VT != MVT::v8f32 && VT != MVT::v4i64 && + VT != MVT::v4f64) + return SDValue(); + + MVT GRLenVT = Subtarget.getGRLenVT(); + int MaskSize = Mask.size(); + assert(MaskSize == (int)VT.getVectorNumElements() && "Unexpected mask size"); + + // Check if exactly one element of the Mask is replaced by 'Replaced', while + // all other elements are either 'Base + i' or undef (-1). On success, return + // the index of the replaced element. Otherwise, just return -1. + auto checkReplaceOne = [&](int Base, int Replaced) -> int { + int Idx = -1; + for (int i = 0; i < MaskSize; ++i) { + if (Mask[i] == Base + i || Mask[i] == -1) + continue; + if (Mask[i] != Replaced) + return -1; + if (Idx == -1) + Idx = i; + else + return -1; + } + return Idx; + }; + + // Case 1: the lowest element of V2 replaces one element in V1. + int Idx = checkReplaceOne(0, MaskSize); + if (Idx != -1) + return DAG.getNode(LoongArchISD::XVINSVE0, DL, VT, V1, V2, + DAG.getConstant(Idx, DL, GRLenVT)); + + // Case 2: the lowest element of V1 replaces one element in V2. + Idx = checkReplaceOne(MaskSize, 0); + if (Idx != -1) + return DAG.getNode(LoongArchISD::XVINSVE0, DL, VT, V2, V1, + DAG.getConstant(Idx, DL, GRLenVT)); + + return SDValue(); +} + /// Lower VECTOR_SHUFFLE into XVSHUF (if possible). static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef Mask, MVT VT, SDValue V1, SDValue V2, @@ -2593,6 +2642,9 @@ static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef Mask, MVT VT, if ((Result = lowerVECTOR_SHUFFLEAsShift(DL, Mask, VT, V1, V2, DAG, Subtarget, Zeroable))) return Result; + if ((Result = + lowerVECTOR_SHUFFLE_XVINSVE0(DL, Mask, VT, V1, V2, DAG, Subtarget))) + return Result; if ((Result = lowerVECTOR_SHUFFLEAsByteRotate(DL, Mask, VT, V1, V2, DAG, Subtarget))) return Result; @@ -2851,9 +2903,10 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op, if (SplatBitSize == 64 && !Subtarget.is64Bit()) { // We can only handle 64-bit elements that are within - // the signed 10-bit range on 32-bit targets. + // the signed 10-bit range or match vldi patterns on 32-bit targets. // See the BUILD_VECTOR case in LoongArchDAGToDAGISel::Select(). - if (!SplatValue.isSignedIntN(10)) + if (!SplatValue.isSignedIntN(10) && + !isImmVLDILegalForMode1(SplatValue, SplatBitSize).first) return SDValue(); if ((Is128Vec && ResTy == MVT::v4i32) || (Is256Vec && ResTy == MVT::v8i32)) @@ -7450,6 +7503,7 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(XVPERM) NODE_NAME_CASE(XVREPLVE0) NODE_NAME_CASE(XVREPLVE0Q) + NODE_NAME_CASE(XVINSVE0) NODE_NAME_CASE(VPICK_SEXT_ELT) NODE_NAME_CASE(VPICK_ZEXT_ELT) NODE_NAME_CASE(VREPLVE) @@ -8543,6 +8597,87 @@ SDValue LoongArchTargetLowering::LowerReturn( return DAG.getNode(LoongArchISD::RET, DL, MVT::Other, RetOps); } +// Check if a constant splat can be generated using [x]vldi, where imm[12] == 1. +// Note: The following prefixes are excluded: +// imm[11:8] == 4'b0000, 4'b0100, 4'b1000 +// as they can be represented using [x]vrepli.[whb] +std::pair LoongArchTargetLowering::isImmVLDILegalForMode1( + const APInt &SplatValue, const unsigned SplatBitSize) const { + uint64_t RequiredImm = 0; + uint64_t V = SplatValue.getZExtValue(); + if (SplatBitSize == 16 && !(V & 0x00FF)) { + // 4'b0101 + RequiredImm = (0b10101 << 8) | (V >> 8); + return {true, RequiredImm}; + } else if (SplatBitSize == 32) { + // 4'b0001 + if (!(V & 0xFFFF00FF)) { + RequiredImm = (0b10001 << 8) | (V >> 8); + return {true, RequiredImm}; + } + // 4'b0010 + if (!(V & 0xFF00FFFF)) { + RequiredImm = (0b10010 << 8) | (V >> 16); + return {true, RequiredImm}; + } + // 4'b0011 + if (!(V & 0x00FFFFFF)) { + RequiredImm = (0b10011 << 8) | (V >> 24); + return {true, RequiredImm}; + } + // 4'b0110 + if ((V & 0xFFFF00FF) == 0xFF) { + RequiredImm = (0b10110 << 8) | (V >> 8); + return {true, RequiredImm}; + } + // 4'b0111 + if ((V & 0xFF00FFFF) == 0xFFFF) { + RequiredImm = (0b10111 << 8) | (V >> 16); + return {true, RequiredImm}; + } + // 4'b1010 + if ((V & 0x7E07FFFF) == 0x3E000000 || (V & 0x7E07FFFF) == 0x40000000) { + RequiredImm = + (0b11010 << 8) | (((V >> 24) & 0xC0) ^ 0x40) | ((V >> 19) & 0x3F); + return {true, RequiredImm}; + } + } else if (SplatBitSize == 64) { + // 4'b1011 + if ((V & 0xFFFFFFFF7E07FFFFULL) == 0x3E000000ULL || + (V & 0xFFFFFFFF7E07FFFFULL) == 0x40000000ULL) { + RequiredImm = + (0b11011 << 8) | (((V >> 24) & 0xC0) ^ 0x40) | ((V >> 19) & 0x3F); + return {true, RequiredImm}; + } + // 4'b1100 + if ((V & 0x7FC0FFFFFFFFFFFFULL) == 0x4000000000000000ULL || + (V & 0x7FC0FFFFFFFFFFFFULL) == 0x3FC0000000000000ULL) { + RequiredImm = + (0b11100 << 8) | (((V >> 56) & 0xC0) ^ 0x40) | ((V >> 48) & 0x3F); + return {true, RequiredImm}; + } + // 4'b1001 + auto sameBitsPreByte = [](uint64_t x) -> std::pair { + uint8_t res = 0; + for (int i = 0; i < 8; ++i) { + uint8_t byte = x & 0xFF; + if (byte == 0 || byte == 0xFF) + res |= ((byte & 1) << i); + else + return {false, 0}; + x >>= 8; + } + return {true, res}; + }; + auto [IsSame, Suffix] = sameBitsPreByte(V); + if (IsSame) { + RequiredImm = (0b11001 << 8) | Suffix; + return {true, RequiredImm}; + } + } + return {false, RequiredImm}; +} + bool LoongArchTargetLowering::isFPImmVLDILegal(const APFloat &Imm, EVT VT) const { if (!Subtarget.hasExtLSX()) @@ -9475,3 +9610,20 @@ bool LoongArchTargetLowering::shouldScalarizeBinop(SDValue VecOp) const { EVT ScalarVT = VecVT.getScalarType(); return isOperationLegalOrCustomOrPromote(Opc, ScalarVT); } + +bool LoongArchTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, + unsigned Index) const { + if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) + return false; + + // Extract a 128-bit subvector from index 0 of a 256-bit vector is free. + return Index == 0; +} + +bool LoongArchTargetLowering::isExtractVecEltCheap(EVT VT, + unsigned Index) const { + EVT EltVT = VT.getScalarType(); + + // Extract a scalar FP value from index 0 of a vector is free. + return (EltVT == MVT::f32 || EltVT == MVT::f64) && Index == 0; +} diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h index b2fccf59169ff..8a4d7748467c7 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h @@ -151,6 +151,7 @@ enum NodeType : unsigned { XVPERM, XVREPLVE0, XVREPLVE0Q, + XVINSVE0, // Extended vector element extraction VPICK_SEXT_ELT, @@ -338,6 +339,15 @@ class LoongArchTargetLowering : public TargetLowering { unsigned Depth) const override; bool shouldScalarizeBinop(SDValue VecOp) const override; + bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, + unsigned Index) const override; + bool isExtractVecEltCheap(EVT VT, unsigned Index) const override; + + /// Check if a constant splat can be generated using [x]vldi, where imm[12] + /// is 1. + std::pair + isImmVLDILegalForMode1(const APInt &SplatValue, + const unsigned SplatBitSize) const; private: /// Target-specific function used to lower LoongArch calling conventions. diff --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td index adfe990ba1234..5143d53bad719 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td @@ -20,6 +20,7 @@ def loongarch_xvpermi: SDNode<"LoongArchISD::XVPERMI", SDT_LoongArchV1RUimm>; def loongarch_xvperm: SDNode<"LoongArchISD::XVPERM", SDT_LoongArchXVPERM>; def loongarch_xvreplve0: SDNode<"LoongArchISD::XVREPLVE0", SDT_LoongArchXVREPLVE0>; def loongarch_xvreplve0q: SDNode<"LoongArchISD::XVREPLVE0Q", SDT_LoongArchXVREPLVE0>; +def loongarch_xvinsve0 : SDNode<"LoongArchISD::XVINSVE0", SDT_LoongArchV2RUimm>; def loongarch_xvmskltz: SDNode<"LoongArchISD::XVMSKLTZ", SDT_LoongArchVMSKCOND>; def loongarch_xvmskgez: SDNode<"LoongArchISD::XVMSKGEZ", SDT_LoongArchVMSKCOND>; def loongarch_xvmskeqz: SDNode<"LoongArchISD::XVMSKEQZ", SDT_LoongArchVMSKCOND>; @@ -1708,6 +1709,14 @@ def : Pat<(vector_insert v4f64:$xd, (f64(bitconvert i64:$rj)), uimm2:$imm), (XVINSGR2VR_D v4f64:$xd, GPR:$rj, uimm2:$imm)>; // XVINSVE0_{W/D} +def : Pat<(loongarch_xvinsve0 v8i32:$xd, v8i32:$xj, uimm3:$imm), + (XVINSVE0_W v8i32:$xd, v8i32:$xj, uimm3:$imm)>; +def : Pat<(loongarch_xvinsve0 v4i64:$xd, v4i64:$xj, uimm2:$imm), + (XVINSVE0_D v4i64:$xd, v4i64:$xj, uimm2:$imm)>; +def : Pat<(loongarch_xvinsve0 v8f32:$xd, v8f32:$xj, uimm3:$imm), + (XVINSVE0_W v8f32:$xd, v8f32:$xj, uimm3:$imm)>; +def : Pat<(loongarch_xvinsve0 v4f64:$xd, v4f64:$xj, uimm2:$imm), + (XVINSVE0_D v4f64:$xd, v4f64:$xj, uimm2:$imm)>; def : Pat<(vector_insert v8f32:$xd, FPR32:$fj, uimm3:$imm), (XVINSVE0_W v8f32:$xd, (SUBREG_TO_REG(i64 0), FPR32:$fj, sub_32), uimm3:$imm)>; @@ -2015,10 +2024,26 @@ def : Pat<(v4i32(fp_to_uint v4f64:$vj)), (XVFTINTRZ_LU_D v4f64:$vj)), sub_128)>; +// abs +def : Pat<(abs v32i8:$xj), (XVMAX_B v32i8:$xj, (XVNEG_B v32i8:$xj))>; +def : Pat<(abs v16i16:$xj), (XVMAX_H v16i16:$xj, (XVNEG_H v16i16:$xj))>; +def : Pat<(abs v8i32:$xj), (XVMAX_W v8i32:$xj, (XVNEG_W v8i32:$xj))>; +def : Pat<(abs v4i64:$xj), (XVMAX_D v4i64:$xj, (XVNEG_D v4i64:$xj))>; + // XVABSD_{B/H/W/D}[U] defm : PatXrXr; defm : PatXrXrU; +// XVADDA_{B/H/W/D} +def : Pat<(add (v32i8 (abs v32i8:$xj)), (v32i8 (abs v32i8:$xk))), + (XVADDA_B v32i8:$xj, v32i8:$xk)>; +def : Pat<(add (v16i16 (abs v16i16:$xj)), (v16i16 (abs v16i16:$xk))), + (XVADDA_H v16i16:$xj, v16i16:$xk)>; +def : Pat<(add (v8i32 (abs v8i32:$xj)), (v8i32 (abs v8i32:$xk))), + (XVADDA_W v8i32:$xj, v8i32:$xk)>; +def : Pat<(add (v4i64 (abs v4i64:$xj)), (v4i64 (abs v4i64:$xk))), + (XVADDA_D v4i64:$xj, v4i64:$xk)>; + // XVSADD_{B/H/W/D}[U], XVSSUB_{B/H/W/D}[U] defm : PatXrXr; defm : PatXrXr; diff --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td index b0eb51a92c6c6..8d1dc99e316c9 100644 --- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td @@ -158,6 +158,7 @@ def vsplatf32_fpimm_eq_1 N = N->getOperand(0).getNode(); return selectVSplat(N, Imm, EltTy.getSizeInBits()) && + Imm.getBitWidth() == 32 && Imm.getBitWidth() == EltTy.getSizeInBits() && Imm == APFloat(+1.0f).bitcastToAPInt(); }]>; @@ -2153,10 +2154,26 @@ def : Pat<(f32 f32imm_vldi:$in), def : Pat<(f64 f64imm_vldi:$in), (f64 (EXTRACT_SUBREG (VLDI (to_f64imm_vldi f64imm_vldi:$in)), sub_64))>; +// abs +def : Pat<(abs v16i8:$vj), (VMAX_B v16i8:$vj, (VNEG_B v16i8:$vj))>; +def : Pat<(abs v8i16:$vj), (VMAX_H v8i16:$vj, (VNEG_H v8i16:$vj))>; +def : Pat<(abs v4i32:$vj), (VMAX_W v4i32:$vj, (VNEG_W v4i32:$vj))>; +def : Pat<(abs v2i64:$vj), (VMAX_D v2i64:$vj, (VNEG_D v2i64:$vj))>; + // VABSD_{B/H/W/D}[U] defm : PatVrVr; defm : PatVrVrU; +// VADDA_{B/H/W/D} +def : Pat<(add (v16i8 (abs v16i8:$vj)), (v16i8 (abs v16i8:$vk))), + (VADDA_B v16i8:$vj, v16i8:$vk)>; +def : Pat<(add (v8i16 (abs v8i16:$vj)), (v8i16 (abs v8i16:$vk))), + (VADDA_H v8i16:$vj, v8i16:$vk)>; +def : Pat<(add (v4i32 (abs v4i32:$vj)), (v4i32 (abs v4i32:$vk))), + (VADDA_W v4i32:$vj, v4i32:$vk)>; +def : Pat<(add (v2i64 (abs v2i64:$vj)), (v2i64 (abs v2i64:$vk))), + (VADDA_D v2i64:$vj, v2i64:$vk)>; + // VSADD_{B/H/W/D}[U], VSSUB_{B/H/W/D}[U] defm : PatVrVr; defm : PatVrVr; diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp index d0a8ababe8e58..c5e26c106b5df 100644 --- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp @@ -57,6 +57,11 @@ static cl::opt cl::desc("Enable the loop data prefetch pass"), cl::init(false)); +static cl::opt + EnableMergeBaseOffset("loongarch-enable-merge-offset", + cl::desc("Enable the merge base offset pass"), + cl::init(true), cl::Hidden); + static Reloc::Model getEffectiveRelocModel(const Triple &TT, std::optional RM) { return RM.value_or(Reloc::Static); @@ -214,7 +219,7 @@ void LoongArchPassConfig::addMachineSSAOptimization() { void LoongArchPassConfig::addPreRegAlloc() { addPass(createLoongArchPreRAExpandPseudoPass()); - if (TM->getOptLevel() != CodeGenOptLevel::None) + if (TM->getOptLevel() != CodeGenOptLevel::None && EnableMergeBaseOffset) addPass(createLoongArchMergeBaseOffsetOptPass()); } diff --git a/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp b/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp index 3e9666f586e0f..e37f3a66fe11f 100644 --- a/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp +++ b/llvm/lib/Target/M68k/AsmParser/M68kAsmParser.cpp @@ -39,10 +39,10 @@ class M68kAsmParser : public MCTargetAsmParser { #include "M68kGenAsmMatcher.inc" // Helpers for Match&Emit. - bool invalidOperand(const SMLoc &Loc, const OperandVector &Operands, + bool invalidOperand(SMLoc Loc, const OperandVector &Operands, const uint64_t &ErrorInfo); - bool missingFeature(const SMLoc &Loc, const uint64_t &ErrorInfo); - bool emit(MCInst &Inst, SMLoc const &Loc, MCStreamer &Out) const; + bool missingFeature(SMLoc Loc, const uint64_t &ErrorInfo); + bool emit(MCInst &Inst, SMLoc Loc, MCStreamer &Out) const; bool parseRegisterName(MCRegister &RegNo, SMLoc Loc, StringRef RegisterName); ParseStatus parseRegister(MCRegister &RegNo); @@ -991,8 +991,7 @@ bool M68kAsmParser::parseInstruction(ParseInstructionInfo &Info, StringRef Name, return false; } -bool M68kAsmParser::invalidOperand(SMLoc const &Loc, - OperandVector const &Operands, +bool M68kAsmParser::invalidOperand(SMLoc Loc, OperandVector const &Operands, uint64_t const &ErrorInfo) { SMLoc ErrorLoc = Loc; char const *Diag = 0; @@ -1015,13 +1014,11 @@ bool M68kAsmParser::invalidOperand(SMLoc const &Loc, return Error(ErrorLoc, Diag); } -bool M68kAsmParser::missingFeature(llvm::SMLoc const &Loc, - uint64_t const &ErrorInfo) { +bool M68kAsmParser::missingFeature(SMLoc Loc, uint64_t const &ErrorInfo) { return Error(Loc, "instruction requires a CPU feature not currently enabled"); } -bool M68kAsmParser::emit(MCInst &Inst, SMLoc const &Loc, - MCStreamer &Out) const { +bool M68kAsmParser::emit(MCInst &Inst, SMLoc Loc, MCStreamer &Out) const { Inst.setLoc(Loc); Out.emitInstruction(Inst, *STI); diff --git a/llvm/lib/Target/Mips/MipsCallingConv.td b/llvm/lib/Target/Mips/MipsCallingConv.td index 3501f9fbfd2e7..748162525b091 100644 --- a/llvm/lib/Target/Mips/MipsCallingConv.td +++ b/llvm/lib/Target/Mips/MipsCallingConv.td @@ -186,7 +186,8 @@ def RetCC_MipsN : CallingConv<[ // // f128 should only occur for the N64 ABI where long double is 128-bit. On // N32, long double is equivalent to double. - CCIfType<[i64], CCIfOrigArgWasF128>>, + CCIfSubtargetNot<"isSingleFloat()", + CCIfType<[i64], CCIfOrigArgWasF128>>>, // Aggregate returns are positioned at the lowest address in the slot for // both little and big-endian targets. When passing in registers, this @@ -316,9 +317,10 @@ def CC_Mips_FixedArg : CallingConv<[ // // f128 should only occur for the N64 ABI where long double is 128-bit. On // N32, long double is equivalent to double. - CCIfType<[i64], - CCIfSubtargetNot<"useSoftFloat()", - CCIfOrigArgWasF128>>>, + CCIfType<[i64], + CCIfSubtargetNot<"isSingleFloat()", + CCIfSubtargetNot<"useSoftFloat()", + CCIfOrigArgWasF128>>>>, CCIfCC<"CallingConv::Fast", CCDelegateTo>, @@ -342,8 +344,8 @@ def CC_Mips : CallingConv<[ // Callee-saved register lists. //===----------------------------------------------------------------------===// -def CSR_SingleFloatOnly : CalleeSavedRegs<(add (sequence "F%u", 31, 20), RA, FP, - (sequence "S%u", 7, 0))>; +def CSR_O32_SingleFloat : CalleeSavedRegs<(add(sequence "F%u", 31, 20), RA, FP, + (sequence "S%u", 7, 0))>; def CSR_O32_FPXX : CalleeSavedRegs<(add (sequence "D%u", 15, 10), RA, FP, (sequence "S%u", 7, 0))> { @@ -357,13 +359,19 @@ def CSR_O32_FP64 : CalleeSavedRegs<(add (decimate (sequence "D%u_64", 30, 20), 2), RA, FP, (sequence "S%u", 7, 0))>; -def CSR_N32 : CalleeSavedRegs<(add D20_64, D22_64, D24_64, D26_64, D28_64, - D30_64, RA_64, FP_64, GP_64, - (sequence "S%u_64", 7, 0))>; +def CSR_N32 : CalleeSavedRegs<(add(decimate(sequence "D%u_64", 30, 20), 2), + RA_64, FP_64, GP_64, (sequence "S%u_64", 7, 0))>; + +def CSR_N32_SingleFloat + : CalleeSavedRegs<(add(decimate(sequence "F%u", 30, 20), 2), RA_64, FP_64, + GP_64, (sequence "S%u_64", 7, 0))>; def CSR_N64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 24), RA_64, FP_64, GP_64, (sequence "S%u_64", 7, 0))>; +def CSR_N64_SingleFloat : CalleeSavedRegs<(add(sequence "F%u", 31, 24), RA_64, + FP_64, GP_64, (sequence "S%u_64", 7, 0))>; + def CSR_Mips16RetHelper : CalleeSavedRegs<(add V0, V1, FP, (sequence "A%u", 3, 0), (sequence "S%u", 7, 0), diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index 1491300e37d3e..b05de49d8332a 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -4265,10 +4265,16 @@ parseRegForInlineAsmConstraint(StringRef C, MVT VT) const { return std::make_pair(0U, nullptr); if (Prefix == "$f") { // Parse $f0-$f31. - // If the size of FP registers is 64-bit or Reg is an even number, select - // the 64-bit register class. Otherwise, select the 32-bit register class. - if (VT == MVT::Other) - VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32; + // If the targets is single float only, always select 32-bit registers, + // otherwise if the size of FP registers is 64-bit or Reg is an even number, + // select the 64-bit register class. Otherwise, select the 32-bit register + // class. + if (VT == MVT::Other) { + if (Subtarget.isSingleFloat()) + VT = MVT::f32; + else + VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32; + } RC = getRegClassFor(VT); @@ -4308,10 +4314,12 @@ MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, return std::make_pair(0U, &Mips::CPU16RegsRegClass); return std::make_pair(0U, &Mips::GPR32RegClass); } - if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) && + if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat()) || + (VT == MVT::f64 && Subtarget.isSingleFloat())) && !Subtarget.isGP64bit()) return std::make_pair(0U, &Mips::GPR32RegClass); - if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat())) && + if ((VT == MVT::i64 || (VT == MVT::f64 && Subtarget.useSoftFloat()) || + (VT == MVT::f64 && Subtarget.isSingleFloat())) && Subtarget.isGP64bit()) return std::make_pair(0U, &Mips::GPR64RegClass); // This will generate an error message diff --git a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp index 6f8d6764e77b8..6ca587b1ba4d5 100644 --- a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp @@ -89,14 +89,25 @@ MipsRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { : CSR_Interrupt_32_SaveList; } - if (Subtarget.isSingleFloat()) - return CSR_SingleFloatOnly_SaveList; + // N64 ABI + if (Subtarget.isABI_N64()) { + if (Subtarget.isSingleFloat()) + return CSR_N64_SingleFloat_SaveList; - if (Subtarget.isABI_N64()) return CSR_N64_SaveList; + } + + // N32 ABI + if (Subtarget.isABI_N32()) { + if (Subtarget.isSingleFloat()) + return CSR_N32_SingleFloat_SaveList; - if (Subtarget.isABI_N32()) return CSR_N32_SaveList; + } + + // O32 ABI + if (Subtarget.isSingleFloat()) + return CSR_O32_SingleFloat_SaveList; if (Subtarget.isFP64bit()) return CSR_O32_FP64_SaveList; @@ -111,14 +122,25 @@ const uint32_t * MipsRegisterInfo::getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const { const MipsSubtarget &Subtarget = MF.getSubtarget(); - if (Subtarget.isSingleFloat()) - return CSR_SingleFloatOnly_RegMask; + // N64 ABI + if (Subtarget.isABI_N64()) { + if (Subtarget.isSingleFloat()) + return CSR_N64_SingleFloat_RegMask; - if (Subtarget.isABI_N64()) return CSR_N64_RegMask; + } + + // N32 ABI + if (Subtarget.isABI_N32()) { + if (Subtarget.isSingleFloat()) + return CSR_N32_SingleFloat_RegMask; - if (Subtarget.isABI_N32()) return CSR_N32_RegMask; + } + + // O32 ABI + if (Subtarget.isSingleFloat()) + return CSR_O32_SingleFloat_RegMask; if (Subtarget.isFP64bit()) return CSR_O32_FP64_RegMask; diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp index 71a70d9c2dd46..19917f3650bb5 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -28,6 +28,7 @@ #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGenTypes/MachineValueType.h" @@ -211,6 +212,16 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM, } } + // Targets with 64bits integer registers, but no 64bit floating point register + // do not support conversion between them + if (Subtarget.isGP64bit() && Subtarget.isSingleFloat() && + !Subtarget.useSoftFloat()) { + setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); + setOperationAction(ISD::SINT_TO_FP, MVT::i64, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); + } + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); setOperationAction(ISD::MULHS, MVT::i32, Custom); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index e86abb7203f2b..bef4868492d4e 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -1027,6 +1027,7 @@ pickOpcodeForVT(MVT::SimpleValueType VT, std::optional Opcode_i16, case MVT::f32: return Opcode_i32; case MVT::v2f32: + case MVT::v2i32: case MVT::i64: case MVT::f64: return Opcode_i64; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 68935784128c0..3ac7c2874408b 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -226,21 +226,20 @@ getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, switch (VectorVT.SimpleTy) { default: return std::nullopt; + case MVT::v4i64: case MVT::v4f64: - case MVT::v8i32: - // This is a "native" vector type iff the address space is global - // and the target supports 256-bit loads/stores + // This is a "native" vector type iff the address space is global and the + // target supports 256-bit loads/stores if (!CanLowerTo256Bit) return std::nullopt; LLVM_FALLTHROUGH; case MVT::v2i8: - case MVT::v2i32: case MVT::v2i64: case MVT::v2f64: - case MVT::v4i32: // This is a "native" vector type return std::pair(NumElts, EltVT); + case MVT::v16f16: // <8 x f16x2> case MVT::v16bf16: // <8 x bf16x2> case MVT::v16i16: // <8 x i16x2> @@ -264,12 +263,18 @@ getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, case MVT::v16i8: // <4 x i8x4> PackRegSize = 32; break; + case MVT::v8f32: // <4 x f32x2> + case MVT::v8i32: // <4 x i32x2> + // This is a "native" vector type iff the address space is global and the + // target supports 256-bit loads/stores if (!CanLowerTo256Bit) return std::nullopt; LLVM_FALLTHROUGH; case MVT::v2f32: // <1 x f32x2> case MVT::v4f32: // <2 x f32x2> + case MVT::v2i32: // <1 x i32x2> + case MVT::v4i32: // <2 x i32x2> if (!STI.hasF32x2Instructions()) return std::pair(NumElts, EltVT); PackRegSize = 64; @@ -590,8 +595,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, addRegisterClass(MVT::bf16, &NVPTX::B16RegClass); addRegisterClass(MVT::v2bf16, &NVPTX::B32RegClass); - if (STI.hasF32x2Instructions()) + if (STI.hasF32x2Instructions()) { addRegisterClass(MVT::v2f32, &NVPTX::B64RegClass); + addRegisterClass(MVT::v2i32, &NVPTX::B64RegClass); + } // Conversion to/from FP16/FP16x2 is always legal. setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); @@ -628,12 +635,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom); - // No support for these operations with v2f32. - setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Expand); - setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Expand); + // No support for these operations with v2f32/v2i32 + setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand); + setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand); // Need custom lowering in case the index is dynamic. if (STI.hasF32x2Instructions()) - setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, + Custom); // Custom conversions to/from v2i8. setOperationAction(ISD::BITCAST, MVT::v2i8, Custom); @@ -661,14 +669,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // Operations not directly supported by NVPTX. for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32, MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, - MVT::v4i8, MVT::i32, MVT::i64}) { + MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) { setOperationAction(ISD::SELECT_CC, VT, Expand); setOperationAction(ISD::BR_CC, VT, Expand); } - // Not directly supported. TLI would attempt to expand operations like - // FMINIMUM(v2f32) using invalid SETCC and VSELECT nodes. - setOperationAction(ISD::VSELECT, MVT::v2f32, Expand); + // We don't want ops like FMINIMUM or UMAX to be lowered to SETCC+VSELECT. + setOperationAction(ISD::VSELECT, {MVT::v2f32, MVT::v2i32}, Expand); // Some SIGN_EXTEND_INREG can be done using cvt instruction. // For others we will expand to a SHL/SRA pair. @@ -815,7 +822,14 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SRA, ISD::SRL, ISD::MULHS, ISD::MULHU, ISD::FP_TO_SINT, ISD::FP_TO_UINT, ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::SETCC}, - MVT::v2i16, Expand); + {MVT::v2i16, MVT::v2i32}, Expand); + + // v2i32 is not supported for any arithmetic operations + setOperationAction({ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, + ISD::CTPOP, ISD::CTLZ, ISD::ADD, ISD::SUB, ISD::MUL, + ISD::SHL, ISD::SRA, ISD::SRL, ISD::OR, ISD::AND, ISD::XOR, + ISD::SREM, ISD::UREM}, + MVT::v2i32, Expand); setOperationAction(ISD::ADDC, MVT::i32, Legal); setOperationAction(ISD::ADDE, MVT::i32, Legal); @@ -829,7 +843,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, } setOperationAction(ISD::CTTZ, MVT::i16, Expand); - setOperationAction(ISD::CTTZ, MVT::v2i16, Expand); + setOperationAction(ISD::CTTZ, {MVT::v2i16, MVT::v2i32}, Expand); setOperationAction(ISD::CTTZ, MVT::i32, Expand); setOperationAction(ISD::CTTZ, MVT::i64, Expand); @@ -1071,7 +1085,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, // Custom lowering for tcgen05.st vector operands setOperationAction(ISD::INTRINSIC_VOID, {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32, - MVT::v32i32, MVT::v64i32, MVT::v128i32}, + MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other}, Custom); // Enable custom lowering for the following: @@ -2604,7 +2618,7 @@ static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG) { return V; } -static SDValue LowerTcgen05St(SDValue Op, SelectionDAG &DAG) { +static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG) { SDNode *N = Op.getNode(); SDLoc DL(N); SmallVector Ops; @@ -2719,7 +2733,52 @@ static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG) { return Tcgen05MMANode; } -static SDValue LowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG) { +// Lower vector return type of tcgen05.ld intrinsics +static std::optional> +lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset = false) { + SDLoc DL(N); + EVT ResVT = N->getValueType(0); + if (!ResVT.isVector()) + return {}; // already legalized. + + const unsigned NumElts = ResVT.getVectorNumElements(); + + // Create the return type of the instructions + SmallVector ListVTs; + for (unsigned i = 0; i < NumElts; ++i) + ListVTs.push_back(MVT::i32); + + ListVTs.push_back(N->getValueType(1)); // Chain + + SDVTList ResVTs = DAG.getVTList(ListVTs); + + SmallVector Ops{N->getOperand(0), N->getOperand(1), + N->getOperand(2)}; + + if (HasOffset) { + Ops.push_back(N->getOperand(3)); // offset + Ops.push_back(N->getOperand(4)); // Pack flag + } else + Ops.push_back(N->getOperand(3)); // Pack flag + + MemIntrinsicSDNode *MemSD = cast(N); + SDValue NewNode = + DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, ResVTs, Ops, + MemSD->getMemoryVT(), MemSD->getMemOperand()); + + // split the vector result + SmallVector ScalarRes; + for (unsigned i = 0; i < NumElts; ++i) { + SDValue Res = NewNode.getValue(i); + ScalarRes.push_back(Res); + } + + SDValue Chain = NewNode.getValue(NumElts); + SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes); + return {{BuildVector, Chain}}; +} + +static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG) { SDNode *N = Op.getNode(); SDValue Intrin = N->getOperand(1); @@ -2765,7 +2824,7 @@ static SDValue LowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG) { case Intrinsic::nvvm_tcgen05_st_16x64b_x64: case Intrinsic::nvvm_tcgen05_st_32x32b_x64: case Intrinsic::nvvm_tcgen05_st_32x32b_x128: - return LowerTcgen05St(Op, DAG); + return lowerTcgen05St(Op, DAG); case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1: case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2: case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1: @@ -2867,6 +2926,28 @@ static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG) { SDValue Selector = (Op->op_end() - 1)->get(); return getPRMT(A, B, Selector, DL, DAG, Mode); } + +static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG) { + switch (Op->getConstantOperandVal(1)) { + default: + return Op; + + // These tcgen05 intrinsics return a v2i32, which is legal, so we have to + // lower them through LowerOperation() instead of ReplaceNodeResults(). + case Intrinsic::nvvm_tcgen05_ld_16x64b_x2: + case Intrinsic::nvvm_tcgen05_ld_16x128b_x1: + case Intrinsic::nvvm_tcgen05_ld_32x32b_x2: + if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG)) + return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op)); + return SDValue(); + + case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: + if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, /*HasOffset=*/true)) + return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op)); + return SDValue(); + } +} + static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG) { switch (Op->getConstantOperandVal(0)) { default: @@ -3029,11 +3110,11 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::ADDRSPACECAST: return LowerADDRSPACECAST(Op, DAG); case ISD::INTRINSIC_W_CHAIN: - return Op; + return lowerIntrinsicWChain(Op, DAG); case ISD::INTRINSIC_WO_CHAIN: return lowerIntrinsicWOChain(Op, DAG); case ISD::INTRINSIC_VOID: - return LowerIntrinsicVoid(Op, DAG); + return lowerIntrinsicVoid(Op, DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::BITCAST: @@ -5920,7 +6001,7 @@ static SDValue PerformEXTRACTCombine(SDNode *N, IsPTXVectorType(VectorVT.getSimpleVT())) return SDValue(); // Native vector loads already combine nicely w/ // extract_vector_elt. - // Don't mess with singletons or packed types (v2f32, v2*16, v4i8 and v8i8), + // Don't mess with singletons or packed types (v2*32, v2*16, v4i8 and v8i8), // we already handle them OK. if (VectorVT.getVectorNumElements() == 1 || NVPTX::isPackedVectorTy(VectorVT) || VectorVT == MVT::v8i8) @@ -6300,53 +6381,6 @@ static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i8, {Vec0, Vec1})); } -// Lower vector return type of tcgen05.ld intrinsics -static void ReplaceTcgen05Ld(SDNode *N, SelectionDAG &DAG, - SmallVectorImpl &Results, - bool hasOffset = false) { - SDLoc DL(N); - EVT ResVT = N->getValueType(0); - if (!ResVT.isVector()) - return; // already legalized. - - const unsigned NumElts = ResVT.getVectorNumElements(); - - // Create the return type of the instructions - SmallVector ListVTs; - for (unsigned i = 0; i < NumElts; ++i) - ListVTs.push_back(MVT::i32); - - ListVTs.push_back(N->getValueType(1)); // Chain - - SDVTList ResVTs = DAG.getVTList(ListVTs); - - SmallVector Ops{N->getOperand(0), N->getOperand(1), - N->getOperand(2)}; - - if (hasOffset) { - Ops.push_back(N->getOperand(3)); // offset - Ops.push_back(N->getOperand(4)); // Pack flag - } else - Ops.push_back(N->getOperand(3)); // Pack flag - - MemIntrinsicSDNode *MemSD = cast(N); - SDValue NewNode = - DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, ResVTs, Ops, - MemSD->getMemoryVT(), MemSD->getMemOperand()); - - // split the vector result - SmallVector ScalarRes; - for (unsigned i = 0; i < NumElts; ++i) { - SDValue Res = NewNode.getValue(i); - ScalarRes.push_back(Res); - } - - SDValue Chain = NewNode.getValue(NumElts); - SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes); - Results.push_back(BuildVector); // Build Vector - Results.push_back(Chain); // Chain -} - static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl &Results) { SDValue Chain = N->getOperand(0); @@ -6455,21 +6489,18 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, return; } - case Intrinsic::nvvm_tcgen05_ld_16x64b_x2: case Intrinsic::nvvm_tcgen05_ld_16x64b_x4: case Intrinsic::nvvm_tcgen05_ld_16x64b_x8: case Intrinsic::nvvm_tcgen05_ld_16x64b_x16: case Intrinsic::nvvm_tcgen05_ld_16x64b_x32: case Intrinsic::nvvm_tcgen05_ld_16x64b_x64: case Intrinsic::nvvm_tcgen05_ld_16x64b_x128: - case Intrinsic::nvvm_tcgen05_ld_32x32b_x2: case Intrinsic::nvvm_tcgen05_ld_32x32b_x4: case Intrinsic::nvvm_tcgen05_ld_32x32b_x8: case Intrinsic::nvvm_tcgen05_ld_32x32b_x16: case Intrinsic::nvvm_tcgen05_ld_32x32b_x32: case Intrinsic::nvvm_tcgen05_ld_32x32b_x64: case Intrinsic::nvvm_tcgen05_ld_32x32b_x128: - case Intrinsic::nvvm_tcgen05_ld_16x128b_x1: case Intrinsic::nvvm_tcgen05_ld_16x128b_x2: case Intrinsic::nvvm_tcgen05_ld_16x128b_x4: case Intrinsic::nvvm_tcgen05_ld_16x128b_x8: @@ -6482,16 +6513,23 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, case Intrinsic::nvvm_tcgen05_ld_16x256b_x8: case Intrinsic::nvvm_tcgen05_ld_16x256b_x16: case Intrinsic::nvvm_tcgen05_ld_16x256b_x32: - return ReplaceTcgen05Ld(N, DAG, Results); + if (auto Res = lowerTcgen05Ld(N, DAG)) { + Results.push_back(Res->first); + Results.push_back(Res->second); + } + return; - case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: - return ReplaceTcgen05Ld(N, DAG, Results, /* Offset */ true); + if (auto Res = lowerTcgen05Ld(N, DAG, /*HasOffset=*/true)) { + Results.push_back(Res->first); + Results.push_back(Res->second); + } + return; } } diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td index 0b2bb694f1fd5..4cacee2290763 100644 --- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td @@ -756,8 +756,10 @@ def : Pat<(vt (select i1:$p, vt:$a, vt:$b)), (SELP_b32rr $a, $b, $p)>; } -def : Pat<(v2f32 (select i1:$p, v2f32:$a, v2f32:$b)), +foreach vt = [v2f32, v2i32] in { +def : Pat<(vt (select i1:$p, vt:$a, vt:$b)), (SELP_b64rr $a, $b, $p)>; +} //----------------------------------- // Test Instructions @@ -2101,8 +2103,8 @@ foreach vt = [v2f16, v2bf16, v2i16] in { (V2I16toI32 $a, $b)>; } -// Same thing for the 64-bit type v2f32. -foreach vt = [v2f32] in { +// Handle extracting one element from the pair (64-bit types) +foreach vt = [v2f32, v2i32] in { def : Pat<(extractelt vt:$src, 0), (I64toI32L_Sink $src)>, Requires<[hasPTX<71>]>; def : Pat<(extractelt vt:$src, 1), (I64toI32H_Sink $src)>, Requires<[hasPTX<71>]>; diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td index 2e81ab122d1df..913487b64617a 100644 --- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td +++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td @@ -54,7 +54,8 @@ def B16 : NVPTXRegClass<[i16, f16, bf16], 16, (add (sequence "RS%u", 0, 4))>; def B32 : NVPTXRegClass<[i32, v2f16, v2bf16, v2i16, v4i8, f32], 32, (add (sequence "R%u", 0, 4), VRFrame32, VRFrameLocal32)>; -def B64 : NVPTXRegClass<[i64, v2f32, f64], 64, (add (sequence "RL%u", 0, 4), +def B64 : NVPTXRegClass<[i64, v2i32, v2f32, f64], 64, + (add (sequence "RL%u", 0, 4), VRFrame64, VRFrameLocal64)>; // 128-bit regs are not defined as general regs in NVPTX. They are used for inlineASM only. def B128 : NVPTXRegClass<[i128], 128, (add (sequence "RQ%u", 0, 4))>; diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h index a070789f85e0b..4b5cb30fd3036 100644 --- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h +++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h @@ -99,8 +99,8 @@ namespace NVPTX { // register. NOTE: This must be kept in sync with the register classes // defined in NVPTXRegisterInfo.td. inline auto packed_types() { - static const auto PackedTypes = {MVT::v4i8, MVT::v2f16, MVT::v2bf16, - MVT::v2i16, MVT::v2f32}; + static const auto PackedTypes = {MVT::v4i8, MVT::v2f16, MVT::v2bf16, + MVT::v2i16, MVT::v2f32, MVT::v2i32}; return PackedTypes; } diff --git a/llvm/lib/Target/PowerPC/PPC.td b/llvm/lib/Target/PowerPC/PPC.td index d89a9487c0da2..4ff2f8a54529f 100644 --- a/llvm/lib/Target/PowerPC/PPC.td +++ b/llvm/lib/Target/PowerPC/PPC.td @@ -360,8 +360,11 @@ def FeatureFastMFLR : SubtargetFeature<"fast-MFLR", "HasFastMFLR", "true", //===----------------------------------------------------------------------===// // PowerPC Instruction Predicate Definitions. -def In32BitMode : Predicate<"!Subtarget->isPPC64()">; -def In64BitMode : Predicate<"Subtarget->isPPC64()">; + +def IsLittleEndian : Predicate<"Subtarget->isLittleEndian()">; +def IsBigEndian : Predicate<"!Subtarget->isLittleEndian()">; +def IsPPC32 : Predicate<"!Subtarget->isPPC64()">; +def IsPPC64 : Predicate<"Subtarget->isPPC64()">; def IsBookE : Predicate<"Subtarget->isBookE()">; def IsNotBookE : Predicate<"!Subtarget->isBookE()">; def HasOnlyMSYNC : Predicate<"Subtarget->hasOnlyMSYNC()">; @@ -379,27 +382,50 @@ def NaNsFPMath : Predicate<"!Subtarget->getTargetMachine().Options.NoNaNsFPMath">; def HasBPERMD : Predicate<"Subtarget->hasBPERMD()">; def HasExtDiv : Predicate<"Subtarget->hasExtDiv()">; +def HasFPU : Predicate<"Subtarget->hasFPU()">; +def HasHTM : Predicate<"Subtarget->hasHTM()">; +def HasDirectMove : Predicate<"Subtarget->hasDirectMove()">; +def HasP8Crypto : Predicate<"Subtarget->hasP8Crypto()">; +def PCRelativeMemops : Predicate<"Subtarget->hasPCRelativeMemops()">; +def PrefixInstrs : Predicate<"Subtarget->hasPrefixInstrs()">; +def PairedVectorMemops : Predicate<"Subtarget->pairedVectorMemops()">; +def MMA : Predicate<"Subtarget->hasMMA()">; + +// Vector support predicates +def HasVSX : Predicate<"Subtarget->hasVSX()">; +def NoP8Vector : Predicate<"!Subtarget->hasP8Vector()">; +def HasP8Vector : Predicate<"Subtarget->hasP8Vector()">; +def HasAltivec : Predicate<"Subtarget->hasAltivec()">; +def HasP8Altivec : Predicate<"Subtarget->hasP8Altivec()">; +def NoP9Vector : Predicate<"!Subtarget->hasP9Vector()">; +def HasP9Vector : Predicate<"Subtarget->hasP9Vector()">; +def NoP9Altivec : Predicate<"!Subtarget->hasP9Altivec()">; +def HasP9Altivec : Predicate<"Subtarget->hasP9Altivec()">; +def HasOnlySwappingMemOps : Predicate<"!Subtarget->hasP9Vector()">; +def NoP10Vector : Predicate<"!Subtarget->hasP10Vector()">; +def HasP10Vector : Predicate<"Subtarget->hasP10Vector()">; + +// Predicates used to differenciate between different ISAs. def IsISA2_06 : Predicate<"Subtarget->isISA2_06()">; def IsISA2_07 : Predicate<"Subtarget->isISA2_07()">; def IsISA3_0 : Predicate<"Subtarget->isISA3_0()">; -def HasFPU : Predicate<"Subtarget->hasFPU()">; -def PCRelativeMemops : Predicate<"Subtarget->hasPCRelativeMemops()">; +def IsISA3_1 : Predicate<"Subtarget->isISA3_1()">; def IsNotISA3_1 : Predicate<"!Subtarget->isISA3_1()">; +def IsISAFuture : Predicate<"Subtarget->isISAFuture()">; +def IsNotISAFuture : Predicate<"!Subtarget->isISAFuture()">; // AIX assembler may not be modern enough to support some extended mne. def ModernAs: Predicate<"!Subtarget->isAIXABI() || Subtarget->HasModernAIXAs">, AssemblerPredicate<(any_of (not AIXOS), FeatureModernAIXAs)>; def IsAIX : Predicate<"Subtarget->isAIXABI()">; def NotAIX : Predicate<"!Subtarget->isAIXABI()">; -def IsISAFuture : Predicate<"Subtarget->isISAFuture()">; -def IsNotISAFuture : Predicate<"!Subtarget->isISAFuture()">; //===----------------------------------------------------------------------===// // HwModes //===----------------------------------------------------------------------===// defvar PPC32 = DefaultMode; -def PPC64 : HwMode<[In64BitMode]>; +def PPC64 : HwMode<[IsPPC64]>; // Since new processors generally contain a superset of features of those that // came before them, the idea is to make implementations of new processors diff --git a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td index 269d30318bca8..60efa4c8f0a37 100644 --- a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td @@ -76,23 +76,23 @@ let Interpretation64Bit = 1, isCodeGenOnly = 1 in { let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, hasSideEffects = 0 in { let isReturn = 1, isPredicable = 1, Uses = [LR8, RM] in def BLR8 : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB, - [(PPCretglue)]>, Requires<[In64BitMode]>; + [(PPCretglue)]>, Requires<[IsPPC64]>; let isBranch = 1, isIndirectBranch = 1, Uses = [CTR8] in { let isPredicable = 1 in def BCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCCTR8 : XLForm_2_br<19, 528, 0, (outs), (ins (pred $BIBO, $CR):$cond), "b${cond:cc}ctr${cond:pm} ${cond:reg}", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCTR8 : XLForm_2_br2<19, 528, 12, 0, (outs), (ins crbitrc:$BI), "bcctr 12, $BI, 0", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCTR8n : XLForm_2_br2<19, 528, 4, 0, (outs), (ins crbitrc:$BI), "bcctr 4, $BI, 0", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } } @@ -160,20 +160,20 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8], hasSideEffects = 0 in { let isPredicable = 1 in def BCTRL8 : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), "bctrl", IIC_BrB, [(PPCbctrl)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; let isCodeGenOnly = 1 in { def BCCCTRL8 : XLForm_2_br<19, 528, 1, (outs), (ins (pred $BIBO, $CR):$cond), "b${cond:cc}ctrl${cond:pm} ${cond:reg}", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCTRL8 : XLForm_2_br2<19, 528, 12, 1, (outs), (ins crbitrc:$BI), "bcctrl 12, $BI, 0", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def BCCTRL8n : XLForm_2_br2<19, 528, 4, 1, (outs), (ins crbitrc:$BI), "bcctrl 4, $BI, 0", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } } } @@ -207,7 +207,7 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR8, RM], hasSideEffects = 0, let isPredicable = 1 in def BCTRL8_RM : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), "bctrl", IIC_BrB, [(PPCbctrl_rm)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } } @@ -218,7 +218,7 @@ let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, (ins (memrix $D, $RA):$src), "bctrl\n\tld 2, $src", IIC_BrB, [(PPCbctrl_load_toc iaddrX4:$src)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, @@ -228,7 +228,7 @@ let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, (ins (memrix $D, $RA):$src), "bctrl\n\tld 2, $src", IIC_BrB, [(PPCbctrl_load_toc_rm iaddrX4:$src)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } } // Interpretation64Bit @@ -449,7 +449,7 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1, isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR8, RM] in def TAILBCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in @@ -516,7 +516,7 @@ let hasSideEffects = 1 in { def EH_SjLj_SetJmp64 : PPCCustomInserterPseudo<(outs gprc:$dst), (ins memr:$buf), "#EH_SJLJ_SETJMP64", [(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } let hasSideEffects = 1, isBarrier = 1 in { @@ -524,7 +524,7 @@ let hasSideEffects = 1, isBarrier = 1 in { def EH_SjLj_LongJmp64 : PPCCustomInserterPseudo<(outs), (ins memr:$buf), "#EH_SJLJ_LONGJMP64", [(PPCeh_sjlj_longjmp addr:$buf)]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; } def MFSPR8 : XFXForm_1<31, 339, (outs g8rc:$RST), (ins i32imm:$SPR), @@ -1948,7 +1948,7 @@ def : Pat<(atomic_load_nonext_64 XForm:$src), (LDX memrr:$src)>; def : Pat<(atomic_store_64 i64:$val, DSForm:$ptr), (STD g8rc:$val, memrix:$ptr)>; def : Pat<(atomic_store_64 i64:$val, XForm:$ptr), (STDX g8rc:$val, memrr:$ptr)>; -let Predicates = [IsISA3_0, In64BitMode] in { +let Predicates = [IsISA3_0, IsPPC64] in { def : Pat<(i64 (int_ppc_cmpeqb g8rc:$a, g8rc:$b)), (i64 (SETB8 (CMPEQB $a, $b)))>; def : Pat<(i64 (int_ppc_setb g8rc:$a, g8rc:$b)), @@ -1961,7 +1961,7 @@ def : Pat<(i64 (int_ppc_maddld g8rc:$a, g8rc:$b, g8rc:$c)), (i64 (MADDLD8 $a, $b, $c))>; } -let Predicates = [In64BitMode] in { +let Predicates = [IsPPC64] in { def : Pat<(i64 (int_ppc_mulhd g8rc:$a, g8rc:$b)), (i64 (MULHD $a, $b))>; def : Pat<(i64 (int_ppc_mulhdu g8rc:$a, g8rc:$b)), diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td index 97d5e28963234..c616db4a1031c 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td @@ -343,7 +343,6 @@ class VXCR_Int_Ty xo, string opc, Intrinsic IntID, ValueType Ty> //===----------------------------------------------------------------------===// // Instruction Definitions. -def HasAltivec : Predicate<"Subtarget->hasAltivec()">; let Predicates = [HasAltivec] in { def DSS : DSS_Form<0, 822, (outs), (ins u5imm:$STRM), @@ -1193,8 +1192,6 @@ class VX_VT5_VA5_VB5_XO9_o xo, string opc, list pattern> let PS = 0; } -def HasP8Altivec : Predicate<"Subtarget->hasP8Altivec()">; -def HasP8Crypto : Predicate<"Subtarget->hasP8Crypto()">; let Predicates = [HasP8Altivec] in { let isCommutable = 1 in { @@ -1420,7 +1417,6 @@ def VSBOX : VXBX_Int_Ty<1480, "vsbox", int_ppc_altivec_crypto_vsbox, v2i64>; } // HasP8Crypto // The following altivec instructions were introduced in Power ISA 3.0 -def HasP9Altivec : Predicate<"Subtarget->hasP9Altivec()">; let Predicates = [HasP9Altivec] in { // Vector Multiply-Sum diff --git a/llvm/lib/Target/PowerPC/PPCInstrHTM.td b/llvm/lib/Target/PowerPC/PPCInstrHTM.td index 8d0ac512b290d..6b5da44c91c2b 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrHTM.td +++ b/llvm/lib/Target/PowerPC/PPCInstrHTM.td @@ -11,10 +11,6 @@ // //===----------------------------------------------------------------------===// - - -def HasHTM : Predicate<"Subtarget->hasHTM()">; - def HTM_get_imm : SDNodeXFormgetZExtValue(), SDLoc(N)); }]>; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td index 1c45050cdf9ca..aca7abd5a45a7 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -1282,7 +1282,7 @@ def RESTORE_CRBIT : PPCEmitTimePseudo<(outs crbitrc:$cond), (ins memri:$F), let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, hasSideEffects = 0 in { let isPredicable = 1, isReturn = 1, Uses = [LR, RM] in def BLR : XLForm_2_ext<19, 16, 20, 0, 0, (outs), (ins), "blr", IIC_BrB, - [(PPCretglue)]>, Requires<[In32BitMode]>; + [(PPCretglue)]>, Requires<[IsPPC32]>; let isBranch = 1, isIndirectBranch = 1, Uses = [CTR] in { let isPredicable = 1 in def BCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB, @@ -1455,7 +1455,7 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in { let isPredicable = 1 in def BCTRL : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), "bctrl", IIC_BrB, [(PPCbctrl)]>, - Requires<[In32BitMode]>; + Requires<[IsPPC32]>; let isCodeGenOnly = 1 in { def BCCCTRL : XLForm_2_br<19, 528, 1, (outs), (ins (pred $BIBO, $CR):$cond), @@ -1541,7 +1541,7 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR, RM], isCodeGenOnly = 1 in { let isPredicable = 1 in def BCTRL_RM : XLForm_2_ext<19, 528, 20, 0, 1, (outs), (ins), "bctrl", IIC_BrB, [(PPCbctrl_rm)]>, - Requires<[In32BitMode]>; + Requires<[IsPPC32]>; } } @@ -1567,7 +1567,7 @@ let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, def BCTRL_LWZinto_toc: XLForm_2_ext_and_DForm_1<19, 528, 20, 0, 1, 32, (outs), (ins (memri $D, $RA):$addr), "bctrl\n\tlwz 2, $addr", IIC_BrB, - [(PPCbctrl_load_toc iaddr:$addr)]>, Requires<[In32BitMode]>; + [(PPCbctrl_load_toc iaddr:$addr)]>, Requires<[IsPPC32]>; } @@ -1576,7 +1576,7 @@ let isCall = 1, PPC970_Unit = 7, isCodeGenOnly = 1, def BCTRL_LWZinto_toc_RM: XLForm_2_ext_and_DForm_1<19, 528, 20, 0, 1, 32, (outs), (ins (memri $D, $RA):$addr), "bctrl\n\tlwz 2, $addr", IIC_BrB, - [(PPCbctrl_load_toc_rm iaddr:$addr)]>, Requires<[In32BitMode]>; + [(PPCbctrl_load_toc_rm iaddr:$addr)]>, Requires<[IsPPC32]>; } @@ -1585,7 +1585,7 @@ let isCodeGenOnly = 1, hasSideEffects = 0 in { let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1, isIndirectBranch = 1, isCall = 1, isReturn = 1, Uses = [CTR, RM] in def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", IIC_BrB, - []>, Requires<[In32BitMode]>; + []>, Requires<[IsPPC32]>; let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7, isBarrier = 1, isCall = 1, isReturn = 1, Uses = [RM] in @@ -1608,7 +1608,7 @@ let hasSideEffects = 1 in { def EH_SjLj_SetJmp32 : PPCCustomInserterPseudo<(outs gprc:$dst), (ins memr:$buf), "#EH_SJLJ_SETJMP32", [(set i32:$dst, (PPCeh_sjlj_setjmp addr:$buf))]>, - Requires<[In32BitMode]>; + Requires<[IsPPC32]>; } let hasSideEffects = 1, isBarrier = 1 in { @@ -1616,7 +1616,7 @@ let hasSideEffects = 1, isBarrier = 1 in { def EH_SjLj_LongJmp32 : PPCCustomInserterPseudo<(outs), (ins memr:$buf), "#EH_SJLJ_LONGJMP32", [(PPCeh_sjlj_longjmp addr:$buf)]>, - Requires<[In32BitMode]>; + Requires<[IsPPC32]>; } // This pseudo is never removed from the function, as it serves as @@ -3438,8 +3438,6 @@ def Msk2Imm : ImmLeaf(Imm); }]>; def Msk4Imm : ImmLeaf(Imm); }]>; def Msk8Imm : ImmLeaf(Imm); }]>; -def MMA : Predicate<"Subtarget->hasMMA()">; - // Prefixed instructions may require access to the above defs at a later // time so we include this after the def. include "PPCInstrP10.td" @@ -5144,9 +5142,9 @@ def RotateInsertByte1 { } // Clear the upper half of the register when in 64-bit mode -let Predicates = [In64BitMode] in +let Predicates = [IsPPC64] in def : Pat<(i32 (bitreverse i32:$A)), (RLDICL_32 RotateInsertByte1.Left, 0, 32)>; -let Predicates = [In32BitMode] in +let Predicates = [IsPPC32] in def : Pat<(i32 (bitreverse i32:$A)), RotateInsertByte1.Left>; // Fast 64-bit reverse bits algorithm: diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td index ba70398daf2c0..2384959a60a43 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrP10.td +++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td @@ -51,10 +51,6 @@ // Moreover, the order of operands reflects the order of operands // in the encoding. -//-------------------------- Predicate definitions ---------------------------// -def IsPPC32 : Predicate<"!Subtarget->isPPC64()">; - - //===----------------------------------------------------------------------===// // PowerPC ISA 3.1 specific type constraints. // @@ -634,9 +630,6 @@ multiclass 8LS_DForm_R_SI34_XT6_RA5_MEM_p opcode, dag OOL, dag IOL, } } -def PrefixInstrs : Predicate<"Subtarget->hasPrefixInstrs()">; -def IsISA3_1 : Predicate<"Subtarget->isISA3_1()">; -def PairedVectorMemops : Predicate<"Subtarget->pairedVectorMemops()">; def RCCp { dag AToVSRC = (COPY_TO_REGCLASS $XA, VSRC); dag BToVSRC = (COPY_TO_REGCLASS $XB, VSRC); diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td index 4e5165bfcda55..979ba31b0431b 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td +++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td @@ -116,20 +116,6 @@ def PPCSToV : SDNode<"PPCISD::SCALAR_TO_VECTOR_PERMUTED", SDTypeProfile<1, 1, []>, []>; def PPCxxperm : SDNode<"PPCISD::XXPERM", SDT_PPCxxperm, []>; -//-------------------------- Predicate definitions ---------------------------// -def HasVSX : Predicate<"Subtarget->hasVSX()">; -def IsLittleEndian : Predicate<"Subtarget->isLittleEndian()">; -def IsBigEndian : Predicate<"!Subtarget->isLittleEndian()">; -def IsPPC64 : Predicate<"Subtarget->isPPC64()">; -def HasOnlySwappingMemOps : Predicate<"!Subtarget->hasP9Vector()">; -def NoP8Vector : Predicate<"!Subtarget->hasP8Vector()">; -def HasP8Vector : Predicate<"Subtarget->hasP8Vector()">; -def HasDirectMove : Predicate<"Subtarget->hasDirectMove()">; -def NoP9Vector : Predicate<"!Subtarget->hasP9Vector()">; -def HasP9Vector : Predicate<"Subtarget->hasP9Vector()">; -def NoP9Altivec : Predicate<"!Subtarget->hasP9Altivec()">; -def NoP10Vector: Predicate<"!Subtarget->hasP10Vector()">; -def HasP10Vector: Predicate<"Subtarget->hasP10Vector()">; def PPCldsplatAlign16 : PatFrag<(ops node:$ptr), (PPCldsplat node:$ptr), [{ return cast(N)->getAlign() >= Align(16) && @@ -1293,13 +1279,13 @@ let Predicates = [HasVSX, HasP8Vector] in { def MFVSRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$RA), (ins vsfrc:$XT), "mfvsrd $RA, $XT", IIC_VecGeneral, [(set i64:$RA, (PPCmfvsr f64:$XT))]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; // FIXME: Setting the hasSideEffects flag here to match current behaviour. let isCodeGenOnly = 1, hasSideEffects = 1 in def MFVRD : XX1_RS6_RD5_XO<31, 51, (outs g8rc:$RA), (ins vsrc:$XT), "mfvsrd $RA, $XT", IIC_VecGeneral, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def MFVSRWZ : XX1_RS6_RD5_XO<31, 115, (outs gprc:$RA), (ins vsfrc:$XT), "mfvsrwz $RA, $XT", IIC_VecGeneral, [(set i32:$RA, (PPCmfvsr f64:$XT))]>, ZExt32To64; @@ -1311,13 +1297,13 @@ let Predicates = [HasVSX, HasP8Vector] in { def MTVSRD : XX1_RS6_RD5_XO<31, 179, (outs vsfrc:$XT), (ins g8rc:$RA), "mtvsrd $XT, $RA", IIC_VecGeneral, [(set f64:$XT, (PPCmtvsra i64:$RA))]>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; // FIXME: Setting the hasSideEffects flag here to match current behaviour. let isCodeGenOnly = 1, hasSideEffects = 1 in def MTVRD : XX1_RS6_RD5_XO<31, 179, (outs vsrc:$XT), (ins g8rc:$RA), "mtvsrd $XT, $RA", IIC_VecGeneral, []>, - Requires<[In64BitMode]>; + Requires<[IsPPC64]>; def MTVSRWA : XX1_RS6_RD5_XO<31, 211, (outs vsfrc:$XT), (ins gprc:$RA), "mtvsrwa $XT, $RA", IIC_VecGeneral, [(set f64:$XT, (PPCmtvsra i32:$RA))]>; @@ -1344,11 +1330,11 @@ def MTVSRWS: XX1_RS6_RD5_XO<31, 403, (outs vsrc:$XT), (ins gprc:$RA), def MTVSRDD: XX1Form<31, 435, (outs vsrc:$XT), (ins g8rc_nox0:$RA, g8rc:$RB), "mtvsrdd $XT, $RA, $RB", IIC_VecGeneral, - []>, Requires<[In64BitMode]>; + []>, Requires<[IsPPC64]>; def MFVSRLD: XX1_RS6_RD5_XO<31, 307, (outs g8rc:$RA), (ins vsrc:$XT), "mfvsrld $RA, $XT", IIC_VecGeneral, - []>, Requires<[In64BitMode]>; + []>, Requires<[IsPPC64]>; } // HasVSX, IsISA3_0, HasDirectMove diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td index 2e14ae3af957e..3f2e7dbd07a67 100644 --- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td +++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td @@ -169,6 +169,64 @@ class SiFive7GetOrderedReductionCycles { int c = !mul(6, VLUpperBound); } +class isSingleDLEN { + bit c = !or(!eq(mx, "MF2"), !or(!eq(mx, "MF4"), !eq(mx, "MF8"))); +} + +class SiFive7GetCyclesVRGatherVV { + // if (hasFastGather && isSingleDLEN(mx)) + // c = 1; + // else if (hasFastGather && (log2(SEW/8) + log2(LMUL) <= log2(DLEN / 32)) + // c = LMUL * 2 * ceil(vl * SEW / DLEN); + // else + // c = vl; + + defvar y = !logtwo(!div(sew, 8)); + defvar x = !cond( + !eq(mx, "M1") : y, + !eq(mx, "M2") : !add(y, 1), + !eq(mx, "M4") : !add(y, 2), + !eq(mx, "M8") : !add(y, 3), + // Give isSingleDLEN(mx) cases a garbage value to avoid build failures, + // even though x will go unused. + true : 1 + ); + // LMUL * 2 * ceil(vl * SEW / DLEN) = LMUL * 2 * ceil(2 * LMUL) + defvar z = !cond( + !eq(mx, "M1") : 4, + !eq(mx, "M2") : 16, + !eq(mx, "M4") : 64, + !eq(mx, "M8") : 256, + // Give isSingleDLEN(mx) cases a garbage value to avoid build failures, + // even though z will go unused. + true : 1 + ); + defvar VLUpperBound = SiFive7GetCyclesOnePerElement.c; + bit IsSingleDLEN = isSingleDLEN.c; + + int c = !cond( + !and(hasFastGather, IsSingleDLEN) : 1, + !and(hasFastGather, !le(x, !logtwo(!div(VLEN, 64)))) : z, + true: VLUpperBound + ); +} + +class SiFive7GetCyclesVCompress { + + // if (hasFastGather && isSingleDLEN(mx)) + // c = 1 + // else + // c = vl + defvar VLUpperBound = SiFive7GetCyclesOnePerElement.c; + bit IsSingleDLEN = isSingleDLEN.c; + + int c = !if(!and(hasFastGather, IsSingleDLEN), + 1, + VLUpperBound); +} + class SiFive7GetSiFiveVFNRClipCycles { int latency = !cond( !eq(mx, "MF8"): 7, @@ -259,7 +317,8 @@ multiclass SiFive7WriteResBase { + bit isFP64Throttled = false, + bit hasFastGather = false> { // Branching let Latency = 3 in { @@ -976,13 +1035,33 @@ multiclass SiFive7WriteResBase.val in { - defvar Cycles = SiFive7GetCyclesOnePerElement.c; defvar IsWorstCase = SiFive7IsWorstCaseMXSEW.c; - let Latency = !add(Cycles, 3), AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in { - defm : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [VCQ, VA1], mx, sew, IsWorstCase>; - defm : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [VCQ, VA1], mx, sew, IsWorstCase>; - defm : LMULSEWWriteResMXSEW<"WriteVCompressV", [VCQ, VA1], mx, sew, IsWorstCase>; - } + defvar IsSingleDLEN = isSingleDLEN.c; + + defvar GatherVVCycles = + SiFive7GetCyclesVRGatherVV.c; + // 7 + DLEN/ SEW + defvar SlowGatherLat = !add(7, !div(!div(VLEN, 2), sew)); + defvar GatherVVLat = !if(hasFastGather, + !add(3, GatherVVCycles), SlowGatherLat); + + let Latency = GatherVVLat, AcquireAtCycles = [0, 1], + ReleaseAtCycles = [1, !add(5, GatherVVCycles)] in + defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherVV", [VCQ, VA1], mx, sew, IsWorstCase>; + + // VRGatherEI16VV is not improved by fastGather. + defvar GatherEI16VVCycles = SiFive7GetCyclesOnePerElement.c; + let Latency = SlowGatherLat, AcquireAtCycles = [0, 1], + ReleaseAtCycles = [1, !add(5, GatherEI16VVCycles)] in + defm "" : LMULSEWWriteResMXSEW<"WriteVRGatherEI16VV", [VCQ, VA1], mx, sew, IsWorstCase>; + + defvar CompressCycles = SiFive7GetCyclesVCompress.c; + defvar CompressLat = !if(!and(hasFastGather, IsSingleDLEN), + 4, + !add(7, CompressCycles)); // 7 + VL + let Latency = CompressLat, AcquireAtCycles = [0, 1], + ReleaseAtCycles = [1, !add(8, CompressCycles)] in + defm "" : LMULSEWWriteResMXSEW<"WriteVCompressV", [VCQ, VA1], mx, sew, IsWorstCase>; } } @@ -1408,7 +1487,8 @@ multiclass SiFive7ReadAdvance { /// eventually be supplied by different SchedMachineModels. multiclass SiFive7SchedResources { + bit isFP64Throttled, + bit hasFastGather> { defm SiFive7 : SiFive7ProcResources; // Pull out defs from SiFive7ProcResources so we can refer to them by name. @@ -1435,7 +1515,8 @@ multiclass SiFive7SchedResources; + SiFive7VCQ, fpLatencies, isFP64Throttled, + hasFastGather>; //===----------------------------------------------------------------------===// // Bypass and advance @@ -1468,6 +1549,7 @@ class SiFive7SchedMachineModel : SchedMachineModel { SiFive7FPLatencies FPLatencies; bit IsFP64Throttled = false; + bit HasFastGather = false; string Name = !subst("Model", "", !subst("SiFive7", "", NAME)); } @@ -1494,6 +1576,7 @@ def SiFive7VLEN1024X300Model : SiFive7SchedMachineModel<1024> { let HasExtraVALU = true; let FPLatencies = SiFive7LowFPLatencies; let IsFP64Throttled = true; + let HasFastGather = true; } /// Binding models to their scheduling resources. @@ -1501,7 +1584,8 @@ foreach model = [SiFive7VLEN512Model, SiFive7VLEN1024X300Model] in { let SchedModel = model in defm model.Name : SiFive7SchedResources; + model.IsFP64Throttled, + model.HasFastGather>; } // Some model name aliases. diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 72a35ee2bc309..563f3bbee81df 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -2774,6 +2774,44 @@ bool RISCVTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, Alignment, Mask, EVL, Stride); return true; } + case Intrinsic::riscv_vloxei_mask: + case Intrinsic::riscv_vluxei_mask: + case Intrinsic::riscv_vsoxei_mask: + case Intrinsic::riscv_vsuxei_mask: + HasMask = true; + [[fallthrough]]; + case Intrinsic::riscv_vloxei: + case Intrinsic::riscv_vluxei: + case Intrinsic::riscv_vsoxei: + case Intrinsic::riscv_vsuxei: { + // Intrinsic interface (only listed ordered version): + // riscv_vloxei(merge, ptr, index, vl) + // riscv_vloxei_mask(merge, ptr, index, mask, vl, policy) + // riscv_vsoxei(val, ptr, index, vl) + // riscv_vsoxei_mask(val, ptr, index, mask, vl, policy) + bool IsWrite = Inst->getType()->isVoidTy(); + Type *Ty = IsWrite ? Inst->getArgOperand(0)->getType() : Inst->getType(); + const auto *RVVIInfo = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IID); + unsigned VLIndex = RVVIInfo->VLOperand; + unsigned PtrOperandNo = VLIndex - 2 - HasMask; + Value *Mask; + if (HasMask) { + Mask = Inst->getArgOperand(VLIndex - 1); + } else { + // Mask cannot be nullptr here: vector GEP produces , + // and casting that to scalar i64 triggers a vector/scalar mismatch + // assertion in CreatePointerCast. Use an all-true mask so ASan lowers it + // via extractelement instead. + Type *MaskType = Ty->getWithNewType(Type::getInt1Ty(C)); + Mask = ConstantInt::getTrue(MaskType); + } + Value *EVL = Inst->getArgOperand(VLIndex); + Value *OffsetOp = Inst->getArgOperand(PtrOperandNo + 1); + Info.InterestingOperands.emplace_back(Inst, PtrOperandNo, IsWrite, Ty, + Align(1), Mask, EVL, + /* Stride */ nullptr, OffsetOp); + return true; + } } return false; } diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp index 86f445954400e..f704d3afdea78 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp @@ -1096,6 +1096,41 @@ static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, return true; } +static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, + unsigned Scope, MachineIRBuilder &MIRBuilder, + SPIRVGlobalRegistry *GR) { + switch (Opcode) { + case SPIRV::OpCommitReadPipe: + case SPIRV::OpCommitWritePipe: + return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0)); + case SPIRV::OpGroupCommitReadPipe: + case SPIRV::OpGroupCommitWritePipe: + case SPIRV::OpGroupReserveReadPipePackets: + case SPIRV::OpGroupReserveWritePipePackets: { + Register ScopeConstReg = + MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0); + MachineRegisterInfo *MRI = MIRBuilder.getMRI(); + MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass); + MachineInstrBuilder MIB; + MIB = MIRBuilder.buildInstr(Opcode); + // Add Return register and type. + if (Opcode == SPIRV::OpGroupReserveReadPipePackets || + Opcode == SPIRV::OpGroupReserveWritePipePackets) + MIB.addDef(Call->ReturnRegister) + .addUse(GR->getSPIRVTypeID(Call->ReturnType)); + + MIB.addUse(ScopeConstReg); + for (unsigned int i = 0; i < Call->Arguments.size(); ++i) + MIB.addUse(Call->Arguments[i]); + + return true; + } + default: + return buildOpFromWrapper(MIRBuilder, Opcode, Call, + GR->getSPIRVTypeID(Call->ReturnType)); + } +} + static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) { switch (dim) { case SPIRV::Dim::DIM_1D: @@ -2350,6 +2385,20 @@ static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR); } +static bool generatePipeInst(const SPIRV::IncomingCall *Call, + MachineIRBuilder &MIRBuilder, + SPIRVGlobalRegistry *GR) { + const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; + unsigned Opcode = + SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode; + + unsigned Scope = SPIRV::Scope::Workgroup; + if (Builtin->Name.contains("sub_group")) + Scope = SPIRV::Scope::Subgroup; + + return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR); +} + static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) { @@ -2948,6 +2997,8 @@ std::optional lowerBuiltin(const StringRef DemangledCall, return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR); case SPIRV::Block2DLoadStore: return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR); + case SPIRV::Pipe: + return generatePipeInst(Call.get(), MIRBuilder, GR); } return false; } diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td index d08560bb6565a..2a8deb6bf498b 100644 --- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td +++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td @@ -69,6 +69,7 @@ def ExtendedBitOps : BuiltinGroup; def BindlessINTEL : BuiltinGroup; def TernaryBitwiseINTEL : BuiltinGroup; def Block2DLoadStore : BuiltinGroup; +def Pipe : BuiltinGroup; //===----------------------------------------------------------------------===// // Class defining a demangled builtin record. The information in the record @@ -633,6 +634,29 @@ defm : DemangledNativeBuiltin<"__spirv_AtomicSMax", OpenCL_std, Atomic, 4, 4, Op defm : DemangledNativeBuiltin<"__spirv_AtomicUMin", OpenCL_std, Atomic, 4, 4, OpAtomicUMin>; defm : DemangledNativeBuiltin<"__spirv_AtomicUMax", OpenCL_std, Atomic, 4, 4, OpAtomicUMax>; +// Pipe Instruction +defm : DemangledNativeBuiltin<"__read_pipe_2", OpenCL_std, Pipe,2, 2, OpReadPipe>; +defm : DemangledNativeBuiltin<"__write_pipe_2", OpenCL_std, Pipe, 2, 2, OpWritePipe>; +defm : DemangledNativeBuiltin<"__read_pipe_4", OpenCL_std, Pipe,4, 4, OpReservedReadPipe>; +defm : DemangledNativeBuiltin<"__write_pipe_4", OpenCL_std, Pipe, 4, 4, OpReservedWritePipe>; +defm : DemangledNativeBuiltin<"__reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpReserveReadPipePackets>; +defm : DemangledNativeBuiltin<"__reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpReserveWritePipePackets>; +defm : DemangledNativeBuiltin<"__commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpCommitReadPipe>; +defm : DemangledNativeBuiltin<"__commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpCommitWritePipe>; +defm : DemangledNativeBuiltin<"is_valid_reserve_id", OpenCL_std, Pipe, 1, 1, OpIsValidReserveId>; +defm : DemangledNativeBuiltin<"__get_pipe_num_packets_ro", OpenCL_std, Pipe, 1, 1, OpGetNumPipePackets>; +defm : DemangledNativeBuiltin<"__get_pipe_max_packets_ro", OpenCL_std, Pipe, 1, 1, OpGetMaxPipePackets>; +defm : DemangledNativeBuiltin<"__get_pipe_num_packets_wo", OpenCL_std, Pipe, 1, 1, OpGetNumPipePackets>; +defm : DemangledNativeBuiltin<"__get_pipe_max_packets_wo", OpenCL_std, Pipe, 1, 1, OpGetMaxPipePackets>; +defm : DemangledNativeBuiltin<"__work_group_reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveReadPipePackets>; +defm : DemangledNativeBuiltin<"__work_group_reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveWritePipePackets>; +defm : DemangledNativeBuiltin<"__work_group_commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitReadPipe>; +defm : DemangledNativeBuiltin<"__work_group_commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitWritePipe>; +defm : DemangledNativeBuiltin<"__sub_group_reserve_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveReadPipePackets>; +defm : DemangledNativeBuiltin<"__sub_group_reserve_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupReserveWritePipePackets>; +defm : DemangledNativeBuiltin<"__sub_group_commit_read_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitReadPipe>; +defm : DemangledNativeBuiltin<"__sub_group_commit_write_pipe", OpenCL_std, Pipe, 2, 2, OpGroupCommitWritePipe>; + // Barrier builtin records: defm : DemangledNativeBuiltin<"barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>; defm : DemangledNativeBuiltin<"work_group_barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>; diff --git a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp index 993de9e9f64ec..85ea9e156cb97 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVCommandLine.cpp @@ -148,7 +148,10 @@ static const std::map> SPIRV::Extension::Extension::SPV_KHR_float_controls2}, {"SPV_INTEL_tensor_float32_conversion", SPIRV::Extension::Extension::SPV_INTEL_tensor_float32_conversion}, - {"SPV_KHR_bfloat16", SPIRV::Extension::Extension::SPV_KHR_bfloat16}}; + {"SPV_KHR_bfloat16", SPIRV::Extension::Extension::SPV_KHR_bfloat16}, + {"SPV_EXT_relaxed_printf_string_address_space", + SPIRV::Extension::Extension:: + SPV_EXT_relaxed_printf_string_address_space}}; bool SPIRVExtensionsParser::parse(cl::Option &O, StringRef ArgName, StringRef ArgValue, diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp index f5a49e2b47363..704edd3139260 100644 --- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp @@ -1909,11 +1909,12 @@ Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &I) { B.SetInsertPoint(&I); SmallVector Types = {I.getInsertedValueOperand()->getType()}; SmallVector Args; - for (auto &Op : I.operands()) - if (isa(Op)) - Args.push_back(UndefValue::get(B.getInt32Ty())); - else - Args.push_back(Op); + Value *AggregateOp = I.getAggregateOperand(); + if (isa(AggregateOp)) + Args.push_back(UndefValue::get(B.getInt32Ty())); + else + Args.push_back(AggregateOp); + Args.push_back(I.getInsertedValueOperand()); for (auto &Op : I.indices()) Args.push_back(B.getInt32(Op)); Instruction *NewI = diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td index 496dcba17c10d..1723bfb639189 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td +++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td @@ -763,7 +763,38 @@ def OpGetDefaultQueue: Op<303, (outs ID:$res), (ins TYPE:$type), def OpBuildNDRange: Op<304, (outs ID:$res), (ins TYPE:$type, ID:$GWS, ID:$LWS, ID:$GWO), "$res = OpBuildNDRange $type $GWS $LWS $GWO">; -// TODO: 3.42.23. Pipe Instructions +// 3.42.23. Pipe Instructions + +def OpReadPipe: Op<274, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReadPipe $type $Pipe $Pointer $PcktSize $PcktAlign">; +def OpWritePipe: Op<275, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign), + "$res = OpWritePipe $type $Pipe $Pointer $PcktSize $PcktAlign">; +def OpReservedReadPipe : Op<276, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$ReserveId, ID:$Index, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReservedReadPipe $type $Pipe $ReserveId $Index $Pointer $PcktSize $PcktAlign">; +def OpReservedWritePipe : Op<277, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$ReserveId, ID:$Index, ID:$Pointer, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReservedWritePipe $type $Pipe $ReserveId $Index $Pointer $PcktSize $PcktAlign">; +def OpReserveReadPipePackets : Op<278, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$NumPckts, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReserveReadPipePackets $type $Pipe $NumPckts $PcktSize $PcktAlign">; +def OpReserveWritePipePackets : Op<279, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$NumPckts, ID:$PcktSize, ID:$PcktAlign), + "$res = OpReserveWritePipePackets $type $Pipe $NumPckts $PcktSize $PcktAlign">; +def OpCommitReadPipe : Op<280, (outs), (ins ID:$Pipe, ID:$ReserveId, ID:$PcktSize, ID:$PcktAlign), + "OpCommitReadPipe $Pipe $ReserveId $PcktSize $PcktAlign">; +def OpCommitWritePipe : Op<281, (outs), (ins ID:$Pipe, ID:$ReserveId, ID:$PcktSize, ID:$PcktAlign), + "OpCommitWritePipe $Pipe $ReserveId $PcktSize $PcktAlign">; +def OpIsValidReserveId : Op<282, (outs ID:$res), (ins TYPE:$type, ID:$ReserveId), + "$res = OpIsValidReserveId $type $ReserveId">; +def OpGetNumPipePackets : Op<283, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$PacketSize, ID:$PacketAlign), + "$res = OpGetNumPipePackets $type $Pipe $PacketSize $PacketAlign">; +def OpGetMaxPipePackets : Op<284, (outs ID:$res), (ins TYPE:$type, ID:$Pipe, ID:$PacketSize, ID:$PacketAlign), + "$res = OpGetMaxPipePackets $type $Pipe $PacketSize $PacketAlign">; +def OpGroupReserveReadPipePackets : Op<285, (outs ID:$res), (ins TYPE:$type, ID:$Scope, ID:$Pipe, ID:$NumPckts, ID:$PacketSize, ID:$PacketAlign), + "$res = OpGroupReserveReadPipePackets $type $Scope $Pipe $NumPckts $PacketSize $PacketAlign">; +def OpGroupReserveWritePipePackets : Op<286, (outs ID:$res), (ins TYPE:$type, ID:$Scope, ID:$Pipe, ID:$NumPckts, ID:$PacketSize, ID:$PacketAlign), + "$res = OpGroupReserveWritePipePackets $type $Scope $Pipe $NumPckts $PacketSize $PacketAlign">; +def OpGroupCommitReadPipe : Op<287, (outs), (ins ID:$Scope, ID:$Pipe, ID:$ReserveId, ID:$PacketSize, ID:$PacketAlign), + "OpGroupCommitReadPipe $Scope $Pipe $ReserveId $PacketSize $PacketAlign">; +def OpGroupCommitWritePipe : Op<288, (outs), (ins ID:$Scope, ID:$Pipe, ID:$ReserveId, ID:$PacketSize, ID:$PacketAlign), + "OpGroupCommitWritePipe $Scope $Pipe $ReserveId $PacketSize $PacketAlign">; // 3.42.24. Non-Uniform Instructions diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index a7b2179a312e1..1aadd9df189a8 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -197,6 +197,8 @@ class SPIRVInstructionSelector : public InstructionSelector { bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, unsigned Opcode) const; + bool selectDebugTrap(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I) const; bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, bool Signed) const; @@ -312,7 +314,8 @@ class SPIRVInstructionSelector : public InstructionSelector { MachineInstr &I) const; bool selectModf(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; - + bool selectFrexp(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I) const; // Utilities std::pair buildI32Constant(uint32_t Val, MachineInstr &I, @@ -833,6 +836,9 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg, case TargetOpcode::G_USUBSAT: return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat); + case TargetOpcode::G_FFREXP: + return selectFrexp(ResVReg, ResType, I); + case TargetOpcode::G_UADDO: return selectOverflowArith(ResVReg, ResType, I, ResType->getOpcode() == SPIRV::OpTypeVector @@ -999,16 +1005,26 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg, // represent code after lowering or intrinsics which are not implemented but // should not crash when found in a customer's LLVM IR input. case TargetOpcode::G_TRAP: - case TargetOpcode::G_DEBUGTRAP: case TargetOpcode::G_UBSANTRAP: case TargetOpcode::DBG_LABEL: return true; + case TargetOpcode::G_DEBUGTRAP: + return selectDebugTrap(ResVReg, ResType, I); default: return false; } } +bool SPIRVInstructionSelector::selectDebugTrap(Register ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const { + unsigned Opcode = SPIRV::OpNop; + MachineBasicBlock &BB = *I.getParent(); + return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) + .constrainAllUses(TII, TRI, RBI); +} + bool SPIRVInstructionSelector::selectExtInst(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, @@ -1107,6 +1123,53 @@ bool SPIRVInstructionSelector::selectExtInstForLRound( return false; } +bool SPIRVInstructionSelector::selectFrexp(Register ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const { + ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp}, + {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}}; + for (const auto &Ex : ExtInsts) { + SPIRV::InstructionSet::InstructionSet Set = Ex.first; + uint32_t Opcode = Ex.second; + if (!STI.canUseExtInstSet(Set)) + continue; + + MachineIRBuilder MIRBuilder(I); + SPIRVType *PointeeTy = GR.getSPIRVTypeForVReg(I.getOperand(1).getReg()); + const SPIRVType *PointerType = GR.getOrCreateSPIRVPointerType( + PointeeTy, MIRBuilder, SPIRV::StorageClass::Function); + Register PointerVReg = + createVirtualRegister(PointerType, &GR, MRI, MRI->getMF()); + + auto It = getOpVariableMBBIt(I); + auto MIB = BuildMI(*It->getParent(), It, It->getDebugLoc(), + TII.get(SPIRV::OpVariable)) + .addDef(PointerVReg) + .addUse(GR.getSPIRVTypeID(PointerType)) + .addImm(static_cast(SPIRV::StorageClass::Function)) + .constrainAllUses(TII, TRI, RBI); + + MIB = MIB & + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addImm(static_cast(Ex.first)) + .addImm(Opcode) + .add(I.getOperand(2)) + .addUse(PointerVReg) + .constrainAllUses(TII, TRI, RBI); + + MIB = MIB & + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) + .addDef(I.getOperand(1).getReg()) + .addUse(GR.getSPIRVTypeID(PointeeTy)) + .addUse(PointerVReg) + .constrainAllUses(TII, TRI, RBI); + return MIB; + } + return false; +} + bool SPIRVInstructionSelector::selectOpWithSrcs(Register ResVReg, const SPIRVType *ResType, MachineInstr &I, diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp index 27bb54c2d2e31..b4fc8dabbd4df 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp @@ -290,6 +290,9 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) { // Control-flow. In some cases (e.g. constants) s1 may be promoted to s32. getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s32}); + getActionDefinitionsBuilder(G_FFREXP).legalForCartesianProduct( + allFloatScalarsAndVectors, {s32, v2s32, v3s32, v4s32, v8s32, v16s32}); + // TODO: Review the target OpenCL and GLSL Extended Instruction Set specs to // tighten these requirements. Many of these math functions are only legal on // specific bitwidths, so they are not selectable for diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp index a95f393b75605..bc159d5c9a113 100644 --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp @@ -1222,6 +1222,31 @@ static void AddDotProductRequirements(const MachineInstr &MI, } } +void addPrintfRequirements(const MachineInstr &MI, + SPIRV::RequirementHandler &Reqs, + const SPIRVSubtarget &ST) { + SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry(); + const SPIRVType *PtrType = GR->getSPIRVTypeForVReg(MI.getOperand(4).getReg()); + if (PtrType) { + MachineOperand ASOp = PtrType->getOperand(1); + if (ASOp.isImm()) { + unsigned AddrSpace = ASOp.getImm(); + if (AddrSpace != SPIRV::StorageClass::UniformConstant) { + if (!ST.canUseExtension( + SPIRV::Extension:: + SPV_EXT_relaxed_printf_string_address_space)) { + report_fatal_error("SPV_EXT_relaxed_printf_string_address_space is " + "required because printf uses a format string not " + "in constant address space.", + false); + } + Reqs.addExtension( + SPIRV::Extension::SPV_EXT_relaxed_printf_string_address_space); + } + } + } +} + static bool isBFloat16Type(const SPIRVType *TypeDef) { return TypeDef && TypeDef->getNumOperands() == 3 && TypeDef->getOpcode() == SPIRV::OpTypeFloat && @@ -1230,8 +1255,9 @@ static bool isBFloat16Type(const SPIRVType *TypeDef) { } void addInstrRequirements(const MachineInstr &MI, - SPIRV::RequirementHandler &Reqs, + SPIRV::ModuleAnalysisInfo &MAI, const SPIRVSubtarget &ST) { + SPIRV::RequirementHandler &Reqs = MAI.Reqs; switch (MI.getOpcode()) { case SPIRV::OpMemoryModel: { int64_t Addr = MI.getOperand(0).getImm(); @@ -1321,6 +1347,12 @@ void addInstrRequirements(const MachineInstr &MI, static_cast( SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) { Reqs.addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info); + break; + } + if (MI.getOperand(3).getImm() == + static_cast(SPIRV::OpenCLExtInst::printf)) { + addPrintfRequirements(MI, Reqs, ST); + break; } break; } @@ -1781,15 +1813,45 @@ void addInstrRequirements(const MachineInstr &MI, break; case SPIRV::OpConvertHandleToImageINTEL: case SPIRV::OpConvertHandleToSamplerINTEL: - case SPIRV::OpConvertHandleToSampledImageINTEL: + case SPIRV::OpConvertHandleToSampledImageINTEL: { if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bindless_images)) report_fatal_error("OpConvertHandleTo[Image/Sampler/SampledImage]INTEL " "instructions require the following SPIR-V extension: " "SPV_INTEL_bindless_images", false); + SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry(); + SPIRV::AddressingModel::AddressingModel AddrModel = MAI.Addr; + SPIRVType *TyDef = GR->getSPIRVTypeForVReg(MI.getOperand(1).getReg()); + if (MI.getOpcode() == SPIRV::OpConvertHandleToImageINTEL && + TyDef->getOpcode() != SPIRV::OpTypeImage) { + report_fatal_error("Incorrect return type for the instruction " + "OpConvertHandleToImageINTEL", + false); + } else if (MI.getOpcode() == SPIRV::OpConvertHandleToSamplerINTEL && + TyDef->getOpcode() != SPIRV::OpTypeSampler) { + report_fatal_error("Incorrect return type for the instruction " + "OpConvertHandleToSamplerINTEL", + false); + } else if (MI.getOpcode() == SPIRV::OpConvertHandleToSampledImageINTEL && + TyDef->getOpcode() != SPIRV::OpTypeSampledImage) { + report_fatal_error("Incorrect return type for the instruction " + "OpConvertHandleToSampledImageINTEL", + false); + } + SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(MI.getOperand(2).getReg()); + unsigned Bitwidth = GR->getScalarOrVectorBitWidth(SpvTy); + if (!(Bitwidth == 32 && AddrModel == SPIRV::AddressingModel::Physical32) && + !(Bitwidth == 64 && AddrModel == SPIRV::AddressingModel::Physical64)) { + report_fatal_error( + "Parameter value must be a 32-bit scalar in case of " + "Physical32 addressing model or a 64-bit scalar in case of " + "Physical64 addressing model", + false); + } Reqs.addExtension(SPIRV::Extension::SPV_INTEL_bindless_images); Reqs.addCapability(SPIRV::Capability::BindlessImagesINTEL); break; + } case SPIRV::OpSubgroup2DBlockLoadINTEL: case SPIRV::OpSubgroup2DBlockLoadTransposeINTEL: case SPIRV::OpSubgroup2DBlockLoadTransformINTEL: @@ -1927,7 +1989,7 @@ static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI, continue; for (const MachineBasicBlock &MBB : *MF) for (const MachineInstr &MI : MBB) - addInstrRequirements(MI, MAI.Reqs, ST); + addInstrRequirements(MI, MAI, ST); } // Collect requirements for OpExecutionMode instructions. auto Node = M.getNamedMetadata("spirv.ExecutionMode"); diff --git a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp index 2b34f61fa2434..4e4e6fb4ab791 100644 --- a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp @@ -335,6 +335,21 @@ static void lowerFunnelShifts(IntrinsicInst *FSHIntrinsic) { FSHIntrinsic->setCalledFunction(FSHFunc); } +static void lowerConstrainedFPCmpIntrinsic( + ConstrainedFPCmpIntrinsic *ConstrainedCmpIntrinsic, + SmallVector &EraseFromParent) { + if (!ConstrainedCmpIntrinsic) + return; + // Extract the floating-point values being compared + Value *LHS = ConstrainedCmpIntrinsic->getArgOperand(0); + Value *RHS = ConstrainedCmpIntrinsic->getArgOperand(1); + FCmpInst::Predicate Pred = ConstrainedCmpIntrinsic->getPredicate(); + IRBuilder<> Builder(ConstrainedCmpIntrinsic); + Value *FCmp = Builder.CreateFCmp(Pred, LHS, RHS); + ConstrainedCmpIntrinsic->replaceAllUsesWith(FCmp); + EraseFromParent.push_back(dyn_cast(ConstrainedCmpIntrinsic)); +} + static void lowerExpectAssume(IntrinsicInst *II) { // If we cannot use the SPV_KHR_expect_assume extension, then we need to // ignore the intrinsic and move on. It should be removed later on by LLVM. @@ -376,6 +391,7 @@ static bool toSpvLifetimeIntrinsic(IntrinsicInst *II, Intrinsic::ID NewID) { bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) { bool Changed = false; const SPIRVSubtarget &STI = TM.getSubtarget(*F); + SmallVector EraseFromParent; for (BasicBlock &BB : *F) { for (Instruction &I : make_early_inc_range(BB)) { auto Call = dyn_cast(&I); @@ -423,9 +439,17 @@ bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) { lowerPtrAnnotation(II); Changed = true; break; + case Intrinsic::experimental_constrained_fcmp: + case Intrinsic::experimental_constrained_fcmps: + lowerConstrainedFPCmpIntrinsic(dyn_cast(II), + EraseFromParent); + Changed = true; + break; } } } + for (auto *I : EraseFromParent) + I->eraseFromParent(); return Changed; } diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp index 253b737ce2290..a8908d4b710e6 100644 --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -1247,7 +1247,7 @@ class X86AsmParser : public MCTargetAsmParser { /// return false if no parsing errors occurred, true otherwise. bool HandleAVX512Operand(OperandVector &Operands); - bool ParseZ(std::unique_ptr &Z, const SMLoc &StartLoc); + bool ParseZ(std::unique_ptr &Z, SMLoc StartLoc); bool is64BitMode() const { // FIXME: Can tablegen auto-generate this? @@ -2907,8 +2907,7 @@ X86::CondCode X86AsmParser::ParseConditionCode(StringRef CC) { // true on failure, false otherwise // If no {z} mark was found - Parser doesn't advance -bool X86AsmParser::ParseZ(std::unique_ptr &Z, - const SMLoc &StartLoc) { +bool X86AsmParser::ParseZ(std::unique_ptr &Z, SMLoc StartLoc) { MCAsmParser &Parser = getParser(); // Assuming we are just pass the '{' mark, quering the next token // Searched for {z}, but none was found. Return false, as no parsing error was diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp index ee9760f881ae9..143c4c43e611a 100644 --- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp +++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp @@ -905,6 +905,7 @@ bool X86LegalizerInfo::legalizeSETROUNDING(MachineInstr &MI, int FieldVal = X86::getRoundingModeX86(RM); if (FieldVal == X86::rmInvalid) { + FieldVal = X86::rmToNearest; LLVMContext &C = MF.getFunction().getContext(); C.diagnose(DiagnosticInfoUnsupported( MF.getFunction(), "rounding mode is not supported by X86 hardware", diff --git a/llvm/lib/Target/X86/X86FixupInstTuning.cpp b/llvm/lib/Target/X86/X86FixupInstTuning.cpp index 33dc0a232815c..a1d4e0bc62310 100644 --- a/llvm/lib/Target/X86/X86FixupInstTuning.cpp +++ b/llvm/lib/Target/X86/X86FixupInstTuning.cpp @@ -277,6 +277,22 @@ bool X86FixupInstTuningPass::processInstruction( return true; }; + // Is ADD(X,X) more efficient than SHL(X,1)? + auto ProcessShiftLeftToAdd = [&](unsigned AddOpc) -> bool { + if (MI.getOperand(NumOperands - 1).getImm() != 1) + return false; + if (!NewOpcPreferable(AddOpc, /*ReplaceInTie*/ true)) + return false; + LLVM_DEBUG(dbgs() << "Replacing: " << MI); + { + MI.setDesc(TII->get(AddOpc)); + MI.removeOperand(NumOperands - 1); + MI.addOperand(MI.getOperand(NumOperands - 2)); + } + LLVM_DEBUG(dbgs() << " With: " << MI); + return false; + }; + switch (Opc) { case X86::BLENDPDrri: return ProcessBLENDToMOV(X86::MOVSDrr, 0x3, 0x1); @@ -563,6 +579,44 @@ bool X86FixupInstTuningPass::processInstruction( return ProcessUNPCKPS(X86::VPUNPCKHDQZ256rmkz); case X86::VUNPCKHPSZrmkz: return ProcessUNPCKPS(X86::VPUNPCKHDQZrmkz); + + case X86::PSLLWri: + return ProcessShiftLeftToAdd(X86::PADDWrr); + case X86::VPSLLWri: + return ProcessShiftLeftToAdd(X86::VPADDWrr); + case X86::VPSLLWYri: + return ProcessShiftLeftToAdd(X86::VPADDWYrr); + case X86::VPSLLWZ128ri: + return ProcessShiftLeftToAdd(X86::VPADDWZ128rr); + case X86::VPSLLWZ256ri: + return ProcessShiftLeftToAdd(X86::VPADDWZ256rr); + case X86::VPSLLWZri: + return ProcessShiftLeftToAdd(X86::VPADDWZrr); + case X86::PSLLDri: + return ProcessShiftLeftToAdd(X86::PADDDrr); + case X86::VPSLLDri: + return ProcessShiftLeftToAdd(X86::VPADDDrr); + case X86::VPSLLDYri: + return ProcessShiftLeftToAdd(X86::VPADDDYrr); + case X86::VPSLLDZ128ri: + return ProcessShiftLeftToAdd(X86::VPADDDZ128rr); + case X86::VPSLLDZ256ri: + return ProcessShiftLeftToAdd(X86::VPADDDZ256rr); + case X86::VPSLLDZri: + return ProcessShiftLeftToAdd(X86::VPADDDZrr); + case X86::PSLLQri: + return ProcessShiftLeftToAdd(X86::PADDQrr); + case X86::VPSLLQri: + return ProcessShiftLeftToAdd(X86::VPADDQrr); + case X86::VPSLLQYri: + return ProcessShiftLeftToAdd(X86::VPADDQYrr); + case X86::VPSLLQZ128ri: + return ProcessShiftLeftToAdd(X86::VPADDQZ128rr); + case X86::VPSLLQZ256ri: + return ProcessShiftLeftToAdd(X86::VPADDQZ256rr); + case X86::VPSLLQZri: + return ProcessShiftLeftToAdd(X86::VPADDQZrr); + default: return false; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 80fbcaa562032..fcfeb661aa891 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4452,11 +4452,12 @@ static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG, template SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget, const SDLoc &DL, EVT VT, ArrayRef Ops, - F Builder, bool CheckBWI = true) { + F Builder, bool CheckBWI = true, + bool AllowAVX512 = true) { assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2"); unsigned NumSubs = 1; if ((CheckBWI && Subtarget.useBWIRegs()) || - (!CheckBWI && Subtarget.useAVX512Regs())) { + (!CheckBWI && AllowAVX512 && Subtarget.useAVX512Regs())) { if (VT.getSizeInBits() > 512) { NumSubs = VT.getSizeInBits() / 512; assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size"); @@ -28714,11 +28715,11 @@ SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op, int FieldVal = X86::getRoundingModeX86(RM); if (FieldVal == X86::rmInvalid) { + FieldVal = X86::rmToNearest; LLVMContext &C = MF.getFunction().getContext(); C.diagnose(DiagnosticInfoUnsupported( MF.getFunction(), "rounding mode is not supported by X86 hardware", DiagnosticLocation(DL.getDebugLoc()), DS_Error)); - return {}; } RMBits = DAG.getConstant(FieldVal, DL, MVT::i16); } else { @@ -30312,22 +30313,8 @@ static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG, uint64_t ShiftAmt = APIntShiftAmt.getZExtValue(); - if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) { - // Hardware support for vector shifts is sparse which makes us scalarize the - // vector operations in many cases. Also, on sandybridge ADD is faster than - // shl: (shl V, 1) -> (add (freeze V), (freeze V)) - if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) { - // R may be undef at run-time, but (shl R, 1) must be an even number (LSB - // must be 0). (add undef, undef) however can be any value. To make this - // safe, we must freeze R to ensure that register allocation uses the same - // register for an undefined value. This ensures that the result will - // still be even and preserves the original semantics. - R = DAG.getFreeze(R); - return DAG.getNode(ISD::ADD, dl, VT, R, R); - } - + if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG); - } // i64 SRA needs to be performed as partial shifts. if (((!Subtarget.hasXOP() && VT == MVT::v2i64) || @@ -45185,10 +45172,13 @@ bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( case X86ISD::WrapperRIP: return true; case X86ISD::BLENDI: + case X86ISD::PSHUFB: case X86ISD::PSHUFD: case X86ISD::UNPCKL: case X86ISD::UNPCKH: + case X86ISD::VPERMILPV: case X86ISD::VPERMILPI: + case X86ISD::VPERMV: case X86ISD::VPERMV3: { SmallVector Mask; SmallVector Ops; @@ -45251,10 +45241,13 @@ bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode( case X86ISD::BLENDV: return false; // SSE target shuffles. + case X86ISD::PSHUFB: case X86ISD::PSHUFD: case X86ISD::UNPCKL: case X86ISD::UNPCKH: + case X86ISD::VPERMILPV: case X86ISD::VPERMILPI: + case X86ISD::VPERMV: case X86ISD::VPERMV3: return false; // SSE comparisons handle all icmp/fcmp cases. @@ -58070,7 +58063,8 @@ static SDValue matchVPMADD52(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, }; return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Acc, X, Y}, VPMADD52Builder, - /*CheckBWI*/ false); + /*CheckBWI*/ false, + /*AllowAVX512*/ Subtarget.hasIFMA()); } static SDValue combineAdd(SDNode *N, SelectionDAG &DAG, diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index b8f299965faa3..2371ed4ed14a1 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -3238,6 +3238,7 @@ multiclass avx512_load opc, string OpcodeStr, string Name, (_.VT _.RC:$src1), (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K, Sched<[Sched.RR]>; + let mayLoad = 1, canFoldAsLoad = 1 in def rmk : AVX512PI opc, string OpcodeStr, string Name, (_.VT _.RC:$src0))))], _.ExeDomain>, EVEX, EVEX_K, Sched<[Sched.RM]>; } + let mayLoad = 1, canFoldAsLoad = 1 in def rmkz : AVX512PI= 3) { + Register MaskReg; + const MachineOperand &Op1 = LoadMI.getOperand(1); + const MachineOperand &Op2 = LoadMI.getOperand(2); + + auto IsVKWMClass = [](const TargetRegisterClass *RC) { + return RC == &X86::VK2WMRegClass || RC == &X86::VK4WMRegClass || + RC == &X86::VK8WMRegClass || RC == &X86::VK16WMRegClass || + RC == &X86::VK32WMRegClass || RC == &X86::VK64WMRegClass; + }; + + if (Op1.isReg() && IsVKWMClass(getRegClass(MCID, 1, &RI))) + MaskReg = Op1.getReg(); + else if (Op2.isReg() && IsVKWMClass(getRegClass(MCID, 2, &RI))) + MaskReg = Op2.getReg(); + + if (MaskReg) { + bool HasSameMask = false; + for (unsigned I = 1, E = MI.getDesc().getNumOperands(); I < E; ++I) { + const MachineOperand &Op = MI.getOperand(I); + if (Op.isReg() && Op.getReg() == MaskReg) { + HasSameMask = true; + break; + } + } + if (!HasSameMask) + return nullptr; + } + } + // TODO: Support the case where LoadMI loads a wide register, but MI // only uses a subreg. for (auto Op : Ops) { @@ -8121,7 +8154,6 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( } // If loading from a FrameIndex, fold directly from the FrameIndex. - unsigned NumOps = LoadMI.getDesc().getNumOperands(); int FrameIndex; if (isLoadFromStackSlot(LoadMI, FrameIndex)) { if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF)) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 04f787aae11aa..cf6d0ecab4f69 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -64,6 +64,7 @@ #include "llvm/Support/KnownBits.h" #include "llvm/Support/KnownFPClass.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/TypeSize.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/InstCombine/InstCombiner.h" #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" @@ -2405,6 +2406,22 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ true, /*MatchBitReversals*/ true)) return BitOp; + + // R = fshl(X, X, C2) + // fshl(R, R, C1) --> fshl(X, X, (C1 + C2) % bitsize) + Value *InnerOp; + const APInt *ShAmtInnerC, *ShAmtOuterC; + if (match(Op0, m_FShl(m_Value(InnerOp), m_Deferred(InnerOp), + m_APInt(ShAmtInnerC))) && + match(ShAmtC, m_APInt(ShAmtOuterC)) && Op0 == Op1) { + APInt Sum = *ShAmtOuterC + *ShAmtInnerC; + APInt Modulo = Sum.urem(APInt(Sum.getBitWidth(), BitWidth)); + if (Modulo.isZero()) + return replaceInstUsesWith(*II, InnerOp); + Constant *ModuloC = ConstantInt::get(Ty, Modulo); + return CallInst::Create(cast(Op0)->getCalledFunction(), + {InnerOp, InnerOp, ModuloC}); + } } // fshl(X, X, Neg(Y)) --> fshr(X, X, Y) @@ -3765,6 +3782,17 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { return replaceInstUsesWith(CI, Res); } } + + // vector.reduce.add.vNiM(splat(%x)) -> mul(%x, N) + if (Value *Splat = getSplatValue(Arg)) { + ElementCount VecToReduceCount = + cast(Arg->getType())->getElementCount(); + if (VecToReduceCount.isFixed()) { + unsigned VectorSize = VecToReduceCount.getFixedValue(); + return BinaryOperator::CreateMul( + Splat, ConstantInt::get(Splat->getType(), VectorSize)); + } + } } [[fallthrough]]; } diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index a20e0dec8841b..cdae9a7271915 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1786,6 +1786,25 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, else NumInstrumentedReads++; + if (O.MaybeByteOffset) { + Type *Ty = Type::getInt8Ty(*C); + IRBuilder IB(O.getInsn()); + + Value *OffsetOp = O.MaybeByteOffset; + if (TargetTriple.isRISCV()) { + Type *OffsetTy = OffsetOp->getType(); + // RVV indexed loads/stores zero-extend offset operands which are narrower + // than XLEN to XLEN. + if (OffsetTy->getScalarType()->getIntegerBitWidth() < + static_cast(LongSize)) { + VectorType *OrigType = cast(OffsetTy); + Type *ExtendTy = VectorType::get(IntptrTy, OrigType); + OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy); + } + } + Addr = IB.CreateGEP(Ty, Addr, {OffsetOp}); + } + unsigned Granularity = 1 << Mapping.Scale; if (O.MaybeMask) { instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL, diff --git a/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp b/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp index c215228b480d2..89980d54ee897 100644 --- a/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp +++ b/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "llvm/Transforms/Scalar/DropUnnecessaryAssumes.h" +#include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/IntrinsicInst.h" @@ -17,13 +18,48 @@ using namespace llvm; using namespace llvm::PatternMatch; static bool affectedValuesAreEphemeral(ArrayRef Affected) { - // If all the affected uses have only one use (part of the assume), then - // the assume does not provide useful information. Note that additional - // users may appear as a result of inlining and CSE, so we should only - // make this assumption late in the optimization pipeline. - // TODO: Handle dead cyclic usages. - // TODO: Handle multiple dead assumes on the same value. - return all_of(Affected, match_fn(m_OneUse(m_Value()))); + // Check whether all the uses are ephemeral, i.e. recursively only used + // by assumes. In that case, the assume does not provide useful information. + // Note that additional users may appear as a result of inlining and CSE, + // so we should only make this assumption late in the optimization pipeline. + SmallSetVector Worklist; + auto AddUsers = [&](Value *V) { + for (User *U : V->users()) { + // Bail out if we need to inspect too many users. + if (Worklist.size() >= 32) + return false; + Worklist.insert(cast(U)); + } + return true; + }; + + for (Value *V : Affected) { + // Do not handle assumes on globals for now. The use list for them may + // contain uses in other functions. + if (!isa(V)) + return false; + + if (!AddUsers(V)) + return false; + } + + for (unsigned Idx = 0; Idx < Worklist.size(); ++Idx) { + Instruction *I = Worklist[Idx]; + + // Use in assume is ephemeral. + if (isa(I)) + continue; + + // Use in side-effecting instruction is non-ephemeral. + if (I->mayHaveSideEffects() || I->isTerminator()) + return false; + + // Otherwise, recursively look at the users. + if (!AddUsers(I)) + return false; + } + + return true; } PreservedAnalyses diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 5e719c6c8cbb7..2d84b4ae1ba5c 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -84,6 +84,7 @@ #include #include #include +#include #include #include #include @@ -6329,9 +6330,12 @@ static bool initializeUniqueCases(SwitchInst *SI, PHINode *&PHI, // Helper function that checks if it is possible to transform a switch with only // two cases (or two cases + default) that produces a result into a select. // TODO: Handle switches with more than 2 cases that map to the same result. +// The branch weights correspond to the provided Condition (i.e. if Condition is +// modified from the original SwitchInst, the caller must adjust the weights) static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Constant *DefaultResult, Value *Condition, - IRBuilder<> &Builder, const DataLayout &DL) { + IRBuilder<> &Builder, const DataLayout &DL, + ArrayRef BranchWeights) { // If we are selecting between only two cases transform into a simple // select or a two-way select if default is possible. // Example: @@ -6340,6 +6344,10 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, // case 20: return 2; ----> %2 = icmp eq i32 %a, 20 // default: return 4; %3 = select i1 %2, i32 2, i32 %1 // } + + const bool HasBranchWeights = + !BranchWeights.empty() && !ProfcheckDisableMetadataFixes; + if (ResultVector.size() == 2 && ResultVector[0].second.size() == 1 && ResultVector[1].second.size() == 1) { ConstantInt *FirstCase = ResultVector[0].second[0]; @@ -6348,13 +6356,37 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, if (DefaultResult) { Value *ValueCompare = Builder.CreateICmpEQ(Condition, SecondCase, "switch.selectcmp"); - SelectValue = Builder.CreateSelect(ValueCompare, ResultVector[1].first, - DefaultResult, "switch.select"); + SelectInst *SelectValueInst = cast(Builder.CreateSelect( + ValueCompare, ResultVector[1].first, DefaultResult, "switch.select")); + SelectValue = SelectValueInst; + if (HasBranchWeights) { + // We start with 3 probabilities, where the numerator is the + // corresponding BranchWeights[i], and the denominator is the sum over + // BranchWeights. We want the probability and negative probability of + // Condition == SecondCase. + assert(BranchWeights.size() == 3); + setBranchWeights(SelectValueInst, BranchWeights[2], + BranchWeights[0] + BranchWeights[1], + /*IsExpected=*/false); + } } Value *ValueCompare = Builder.CreateICmpEQ(Condition, FirstCase, "switch.selectcmp"); - return Builder.CreateSelect(ValueCompare, ResultVector[0].first, - SelectValue, "switch.select"); + SelectInst *Ret = cast(Builder.CreateSelect( + ValueCompare, ResultVector[0].first, SelectValue, "switch.select")); + if (HasBranchWeights) { + // We may have had a DefaultResult. Base the position of the first and + // second's branch weights accordingly. Also the proability that Condition + // != FirstCase needs to take that into account. + assert(BranchWeights.size() >= 2); + size_t FirstCasePos = (Condition != nullptr); + size_t SecondCasePos = FirstCasePos + 1; + uint32_t DefaultCase = (Condition != nullptr) ? BranchWeights[0] : 0; + setBranchWeights(Ret, BranchWeights[FirstCasePos], + DefaultCase + BranchWeights[SecondCasePos], + /*IsExpected=*/false); + } + return Ret; } // Handle the degenerate case where two cases have the same result value. @@ -6390,8 +6422,16 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Value *And = Builder.CreateAnd(Condition, AndMask); Value *Cmp = Builder.CreateICmpEQ( And, Constant::getIntegerValue(And->getType(), AndMask)); - return Builder.CreateSelect(Cmp, ResultVector[0].first, - DefaultResult); + SelectInst *Ret = cast( + Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult)); + if (HasBranchWeights) { + // We know there's a Default case. We base the resulting branch + // weights off its probability. + assert(BranchWeights.size() >= 2); + setBranchWeights(Ret, accumulate(drop_begin(BranchWeights), 0), + BranchWeights[0], /*IsExpected=*/false); + } + return Ret; } } @@ -6408,7 +6448,14 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Value *And = Builder.CreateAnd(Condition, ~BitMask, "switch.and"); Value *Cmp = Builder.CreateICmpEQ( And, Constant::getNullValue(And->getType()), "switch.selectcmp"); - return Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); + SelectInst *Ret = cast( + Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult)); + if (HasBranchWeights) { + assert(BranchWeights.size() >= 2); + setBranchWeights(Ret, accumulate(drop_begin(BranchWeights), 0), + BranchWeights[0], /*IsExpected=*/false); + } + return Ret; } } @@ -6419,7 +6466,14 @@ static Value *foldSwitchToSelect(const SwitchCaseResultVectorTy &ResultVector, Value *Cmp2 = Builder.CreateICmpEQ(Condition, CaseValues[1], "switch.selectcmp.case2"); Value *Cmp = Builder.CreateOr(Cmp1, Cmp2, "switch.selectcmp"); - return Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult); + SelectInst *Ret = cast( + Builder.CreateSelect(Cmp, ResultVector[0].first, DefaultResult)); + if (HasBranchWeights) { + assert(BranchWeights.size() >= 2); + setBranchWeights(Ret, accumulate(drop_begin(BranchWeights), 0), + BranchWeights[0], /*IsExpected=*/false); + } + return Ret; } } @@ -6480,8 +6534,18 @@ static bool trySwitchToSelect(SwitchInst *SI, IRBuilder<> &Builder, assert(PHI != nullptr && "PHI for value select not found"); Builder.SetInsertPoint(SI); - Value *SelectValue = - foldSwitchToSelect(UniqueResults, DefaultResult, Cond, Builder, DL); + SmallVector BranchWeights; + if (!ProfcheckDisableMetadataFixes) { + [[maybe_unused]] auto HasWeights = + extractBranchWeights(getBranchWeightMDNode(*SI), BranchWeights); + assert(!HasWeights == (BranchWeights.empty())); + } + assert(BranchWeights.empty() || + (BranchWeights.size() >= + UniqueResults.size() + (DefaultResult != nullptr))); + + Value *SelectValue = foldSwitchToSelect(UniqueResults, DefaultResult, Cond, + Builder, DL, BranchWeights); if (!SelectValue) return false; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index a2d61d689397b..ab5c9c99b9448 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2438,8 +2438,9 @@ struct CSEDenseMapInfo { } // end anonymous namespace -///Perform cse of induction variable instructions. -static void cse(BasicBlock *BB) { +/// FIXME: This legacy common-subexpression-elimination routine is scheduled for +/// removal, in favor of the VPlan-based one. +static void legacyCSE(BasicBlock *BB) { // Perform simple cse. SmallDenseMap CSEMap; for (Instruction &In : llvm::make_early_inc_range(*BB)) { @@ -2543,7 +2544,7 @@ void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { BasicBlock *HeaderBB = State.CFG.VPBB2IRBB[HeaderVPBB]; // Remove redundant induction instructions. - cse(HeaderBB); + legacyCSE(HeaderBB); } void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { @@ -3901,7 +3902,8 @@ void LoopVectorizationPlanner::emitInvalidCostRemarks( if (VF.isScalar()) continue; - VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, + *CM.PSE.getSE()); precomputeCosts(*Plan, VF, CostCtx); auto Iter = vp_depth_first_deep(Plan->getVectorLoopRegion()->getEntry()); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly(Iter)) { @@ -4158,7 +4160,8 @@ VectorizationFactor LoopVectorizationPlanner::selectVectorizationFactor() { // Add on other costs that are modelled in VPlan, but not in the legacy // cost model. - VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *P, CM, CM.CostKind, + *CM.PSE.getSE()); VPRegionBlock *VectorRegion = P->getVectorLoopRegion(); assert(VectorRegion && "Expected to have a vector region!"); for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly( @@ -6833,7 +6836,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF, InstructionCost LoopVectorizationPlanner::cost(VPlan &Plan, ElementCount VF) const { - VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, Plan, CM, CM.CostKind, *PSE.getSE()); InstructionCost Cost = precomputeCosts(Plan, VF, CostCtx); // Now compute and add the VPlan-based cost. @@ -7066,7 +7069,8 @@ VectorizationFactor LoopVectorizationPlanner::computeBestVF() { // simplifications not accounted for in the legacy cost model. If that's the // case, don't trigger the assertion, as the extra simplifications may cause a // different VF to be picked by the VPlan-based cost model. - VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, BestPlan, CM, CM.CostKind, + *CM.PSE.getSE()); precomputeCosts(BestPlan, BestFactor.Width, CostCtx); // Verify that the VPlan-based and legacy cost models agree, except for VPlans // with early exits and plans with additional VPlan simplifications. The @@ -7207,7 +7211,6 @@ DenseMap LoopVectorizationPlanner::executePlan( VPlanTransforms::narrowInterleaveGroups( BestVPlan, BestVF, TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)); - VPlanTransforms::cse(BestVPlan); VPlanTransforms::removeDeadRecipes(BestVPlan); VPlanTransforms::convertToConcreteRecipes(BestVPlan); @@ -7221,6 +7224,7 @@ DenseMap LoopVectorizationPlanner::executePlan( BestVPlan, VectorPH, CM.foldTailByMasking(), CM.requiresScalarEpilogue(BestVF.isVector())); VPlanTransforms::materializeVFAndVFxUF(BestVPlan, VectorPH, BestVF); + VPlanTransforms::cse(BestVPlan); VPlanTransforms::simplifyRecipes(BestVPlan); // 0. Generate SCEV-dependent code in the entry, including TripCount, before @@ -8596,7 +8600,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( // TODO: Enable following transform when the EVL-version of extended-reduction // and mulacc-reduction are implemented. if (!CM.foldTailWithEVL()) { - VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind); + VPCostContext CostCtx(CM.TTI, *CM.TLI, *Plan, CM, CM.CostKind, + *CM.PSE.getSE()); VPlanTransforms::runPass(VPlanTransforms::convertToAbstractRecipes, *Plan, CostCtx, Range); } @@ -10053,7 +10058,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { bool ForceVectorization = Hints.getForce() == LoopVectorizeHints::FK_Enabled; VPCostContext CostCtx(CM.TTI, *CM.TLI, LVP.getPlanFor(VF.Width), CM, - CM.CostKind); + CM.CostKind, *CM.PSE.getSE()); if (!ForceVectorization && !isOutsideLoopWorkProfitable(Checks, VF, L, PSE, CostCtx, LVP.getPlanFor(VF.Width), SEL, diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index a1c6f7977885f..728d29107808d 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -845,19 +845,10 @@ InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) { if (VF.isScalable()) return InstructionCost::getInvalid(); - // First compute the cost of the conditionally executed recipes, followed by - // account for the branching cost, except if the mask is a header mask or - // uniform condition. - using namespace llvm::VPlanPatternMatch; + // Compute and return the cost of the conditionally executed recipes. + assert(VF.isVector() && "Can only compute vector cost at the moment."); VPBasicBlock *Then = cast(getEntry()->getSuccessors()[0]); - InstructionCost ThenCost = Then->cost(VF, Ctx); - - // For the scalar case, we may not always execute the original predicated - // block, Thus, scale the block's cost by the probability of executing it. - if (VF.isScalar()) - return ThenCost / getPredBlockCostDivisor(Ctx.CostKind); - - return ThenCost; + return Then->cost(VF, Ctx); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -1759,7 +1750,8 @@ VPCostContext::getOperandInfo(VPValue *V) const { } InstructionCost VPCostContext::getScalarizationOverhead( - Type *ResultTy, ArrayRef Operands, ElementCount VF) { + Type *ResultTy, ArrayRef Operands, ElementCount VF, + bool AlwaysIncludeReplicatingR) { if (VF.isScalar()) return 0; @@ -1779,7 +1771,9 @@ InstructionCost VPCostContext::getScalarizationOverhead( SmallPtrSet UniqueOperands; SmallVector Tys; for (auto *Op : Operands) { - if (Op->isLiveIn() || isa(Op) || + if (Op->isLiveIn() || + (!AlwaysIncludeReplicatingR && + isa(Op)) || !UniqueOperands.insert(Op).second) continue; Tys.push_back(toVectorizedTy(Types.inferScalarType(Op), VF)); diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h index fe59774b7c838..2a8baec74b72b 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h +++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h @@ -349,12 +349,14 @@ struct VPCostContext { LoopVectorizationCostModel &CM; SmallPtrSet SkipCostComputation; TargetTransformInfo::TargetCostKind CostKind; + ScalarEvolution &SE; VPCostContext(const TargetTransformInfo &TTI, const TargetLibraryInfo &TLI, const VPlan &Plan, LoopVectorizationCostModel &CM, - TargetTransformInfo::TargetCostKind CostKind) + TargetTransformInfo::TargetCostKind CostKind, + ScalarEvolution &SE) : TTI(TTI), TLI(TLI), Types(Plan), LLVMCtx(Plan.getContext()), CM(CM), - CostKind(CostKind) {} + CostKind(CostKind), SE(SE) {} /// Return the cost for \p UI with \p VF using the legacy cost model as /// fallback until computing the cost of all recipes migrates to VPlan. @@ -374,10 +376,12 @@ struct VPCostContext { /// Estimate the overhead of scalarizing a recipe with result type \p ResultTy /// and \p Operands with \p VF. This is a convenience wrapper for the - /// type-based getScalarizationOverhead API. - InstructionCost getScalarizationOverhead(Type *ResultTy, - ArrayRef Operands, - ElementCount VF); + /// type-based getScalarizationOverhead API. If \p AlwaysIncludeReplicatingR + /// is true, always compute the cost of scalarizing replicating operands. + InstructionCost + getScalarizationOverhead(Type *ResultTy, ArrayRef Operands, + ElementCount VF, + bool AlwaysIncludeReplicatingR = false); }; /// This class can be used to assign names to VPValues. For VPValues without diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 8892f9b098349..b5e30cb1fa655 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -1214,6 +1214,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const { case Instruction::Select: case Instruction::PHI: case VPInstruction::AnyOf: + case VPInstruction::Broadcast: case VPInstruction::BuildStructVector: case VPInstruction::BuildVector: case VPInstruction::CalculateTripCountMinusVF: @@ -3068,6 +3069,61 @@ bool VPReplicateRecipe::shouldPack() const { }); } +/// Returns true if \p Ptr is a pointer computation for which the legacy cost +/// model computes a SCEV expression when computing the address cost. +static bool shouldUseAddressAccessSCEV(const VPValue *Ptr) { + auto *PtrR = Ptr->getDefiningRecipe(); + if (!PtrR || !((isa(PtrR) && + cast(PtrR)->getOpcode() == + Instruction::GetElementPtr) || + isa(PtrR))) + return false; + + // We are looking for a GEP where all indices are either loop invariant or + // inductions. + for (VPValue *Opd : drop_begin(PtrR->operands())) { + if (!Opd->isDefinedOutsideLoopRegions() && + !isa(Opd)) + return false; + } + + return true; +} + +/// Returns true if \p V is used as part of the address of another load or +/// store. +static bool isUsedByLoadStoreAddress(const VPUser *V) { + SmallPtrSet Seen; + SmallVector WorkList = {V}; + + while (!WorkList.empty()) { + auto *Cur = dyn_cast(WorkList.pop_back_val()); + if (!Cur || !Seen.insert(Cur).second) + continue; + + for (VPUser *U : Cur->users()) { + if (auto *InterleaveR = dyn_cast(U)) + if (InterleaveR->getAddr() == Cur) + return true; + if (auto *RepR = dyn_cast(U)) { + if (RepR->getOpcode() == Instruction::Load && + RepR->getOperand(0) == Cur) + return true; + if (RepR->getOpcode() == Instruction::Store && + RepR->getOperand(1) == Cur) + return true; + } + if (auto *MemR = dyn_cast(U)) { + if (MemR->getAddr() == Cur && MemR->isConsecutive()) + return true; + } + } + + append_range(WorkList, cast(Cur)->users()); + } + return false; +} + InstructionCost VPReplicateRecipe::computeCost(ElementCount VF, VPCostContext &Ctx) const { Instruction *UI = cast(getUnderlyingValue()); @@ -3175,21 +3231,58 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF, } case Instruction::Load: case Instruction::Store: { - if (isSingleScalar()) { - bool IsLoad = UI->getOpcode() == Instruction::Load; - Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0)); - Type *ScalarPtrTy = Ctx.Types.inferScalarType(getOperand(IsLoad ? 0 : 1)); - const Align Alignment = getLoadStoreAlignment(UI); - unsigned AS = getLoadStoreAddressSpace(UI); - TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0)); - InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost( - UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo, UI); - return ScalarMemOpCost + Ctx.TTI.getAddressComputationCost( - ScalarPtrTy, nullptr, nullptr, Ctx.CostKind); - } + if (VF.isScalable() && !isSingleScalar()) + return InstructionCost::getInvalid(); + // TODO: See getMemInstScalarizationCost for how to handle replicating and // predicated cases. - break; + const VPRegionBlock *ParentRegion = getParent()->getParent(); + if (ParentRegion && ParentRegion->isReplicator()) + break; + + bool IsLoad = UI->getOpcode() == Instruction::Load; + const VPValue *PtrOp = getOperand(!IsLoad); + // TODO: Handle cases where we need to pass a SCEV to + // getAddressComputationCost. + if (shouldUseAddressAccessSCEV(PtrOp)) + break; + + Type *ValTy = Ctx.Types.inferScalarType(IsLoad ? this : getOperand(0)); + Type *ScalarPtrTy = Ctx.Types.inferScalarType(PtrOp); + const Align Alignment = getLoadStoreAlignment(UI); + unsigned AS = getLoadStoreAddressSpace(UI); + TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(UI->getOperand(0)); + InstructionCost ScalarMemOpCost = Ctx.TTI.getMemoryOpCost( + UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo); + + Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF); + + InstructionCost ScalarCost = + ScalarMemOpCost + Ctx.TTI.getAddressComputationCost( + PtrTy, &Ctx.SE, nullptr, Ctx.CostKind); + if (isSingleScalar()) + return ScalarCost; + + SmallVector OpsToScalarize; + Type *ResultTy = Type::getVoidTy(PtrTy->getContext()); + // Set ResultTy and OpsToScalarize, if scalarization is needed. Currently we + // don't assign scalarization overhead in general, if the target prefers + // vectorized addressing or the loaded value is used as part of an address + // of another load or store. + bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing(); + if (PreferVectorizedAddressing || !isUsedByLoadStoreAddress(this)) { + bool EfficientVectorLoadStore = + Ctx.TTI.supportsEfficientVectorElementLoadStore(); + if (!(IsLoad && !PreferVectorizedAddressing) && + !(!IsLoad && EfficientVectorLoadStore)) + append_range(OpsToScalarize, operands()); + + if (!EfficientVectorLoadStore) + ResultTy = Ctx.Types.inferScalarType(this); + } + + return (ScalarCost * VF.getFixedValue()) + + Ctx.getScalarizationOverhead(ResultTy, OpsToScalarize, VF, true); } } diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 58fab8f222d23..5252e1f928294 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -2853,6 +2853,7 @@ void VPlanTransforms::replaceSymbolicStrides( return R->getParent()->getParent() || R->getParent() == Plan.getVectorLoopRegion()->getSinglePredecessor(); }; + ValueToSCEVMapTy RewriteMap; for (const SCEV *Stride : StridesMap.values()) { using namespace SCEVPatternMatch; auto *StrideV = cast(Stride)->getValue(); @@ -2880,6 +2881,22 @@ void VPlanTransforms::replaceSymbolicStrides( VPValue *CI = Plan.getOrAddLiveIn(ConstantInt::get(U->getType(), C)); StrideVPV->replaceUsesWithIf(CI, CanUseVersionedStride); } + RewriteMap[StrideV] = PSE.getSCEV(StrideV); + } + + for (VPRecipeBase &R : *Plan.getEntry()) { + auto *ExpSCEV = dyn_cast(&R); + if (!ExpSCEV) + continue; + const SCEV *ScevExpr = ExpSCEV->getSCEV(); + auto *NewSCEV = + SCEVParameterRewriter::rewrite(ScevExpr, *PSE.getSE(), RewriteMap); + if (NewSCEV != ScevExpr) { + VPValue *NewExp = vputils::getOrCreateVPValueForSCEVExpr(Plan, NewSCEV); + ExpSCEV->replaceAllUsesWith(NewExp); + if (Plan.getTripCount() == ExpSCEV) + Plan.resetTripCount(NewExp); + } } } diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp index eac0e705a877d..059993043dcda 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp @@ -13,6 +13,7 @@ #include "llvm/Analysis/ScalarEvolutionExpressions.h" using namespace llvm; +using namespace llvm::VPlanPatternMatch; bool vputils::onlyFirstLaneUsed(const VPValue *Def) { return all_of(Def->users(), @@ -63,7 +64,6 @@ bool vputils::isHeaderMask(const VPValue *V, VPlan &Plan) { }; VPValue *A, *B; - using namespace VPlanPatternMatch; if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B), m_One()))) return B == Plan.getTripCount() && @@ -90,7 +90,6 @@ const SCEV *vputils::getSCEVExprForVPValue(VPValue *V, ScalarEvolution &SE) { } bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) { - using namespace VPlanPatternMatch; // Live-ins are uniform. if (V->isLiveIn()) return true; @@ -159,7 +158,6 @@ std::optional vputils::getRecipesForUncountableExit(VPlan &Plan, SmallVectorImpl &Recipes, SmallVectorImpl &GEPs) { - using namespace llvm::VPlanPatternMatch; // Given a VPlan like the following (just including the recipes contributing // to loop control exiting here, not the actual work), we're looking to match // the recipes contributing to the uncountable exit condition comparison diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 92caa0b4e51d5..013ea2e883534 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -199,7 +199,8 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const { // EVLIVIncrement is only used by EVLIV & BranchOnCount. // Having more than two users is unexpected. using namespace llvm::VPlanPatternMatch; - if ((I->getNumUsers() != 1) && + if (I->getOpcode() != VPInstruction::Broadcast && + I->getNumUsers() != 1 && (I->getNumUsers() != 2 || none_of(I->users(), match_fn(m_BranchOnCount(m_Specific(I), m_VPValue()))))) { diff --git a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll index c367b31f6d445..023a8c056968f 100644 --- a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll @@ -187,45 +187,6 @@ exit: ret void } -; In the following test, dependence distance is possibly zero, -; but this is not equivalent to the condition known-non-positive -; and known-non-negative. - -define void @possibly_zero_dist_diff_typesz(ptr %p) { -; CHECK-LABEL: 'possibly_zero_dist_diff_typesz' -; CHECK-NEXT: loop: -; CHECK-NEXT: Memory dependences are safe -; CHECK-NEXT: Dependences: -; CHECK-NEXT: Forward: -; CHECK-NEXT: %ld.p = load i32, ptr %gep.p.iv.i32, align 1 -> -; CHECK-NEXT: store i16 %trunc, ptr %gep.p.iv.i16, align 1 -; CHECK-EMPTY: -; CHECK-NEXT: Run-time memory checks: -; CHECK-NEXT: Grouped accesses: -; CHECK-EMPTY: -; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop. -; CHECK-NEXT: SCEV assumptions: -; CHECK-EMPTY: -; CHECK-NEXT: Expressions re-written: -; -entry: - br label %loop - -loop: - %iv = phi i16 [ 0, %entry ], [ %iv.next, %loop ] - %gep.p.iv.i32 = getelementptr inbounds nuw i32, ptr %p, i16 %iv - %ld.p = load i32, ptr %gep.p.iv.i32, align 1 - %trunc = trunc i32 %ld.p to i16 - %gep.p.iv.i16 = getelementptr inbounds nuw i16, ptr %p, i16 %iv - store i16 %trunc, ptr %gep.p.iv.i16, align 1 - %iv.next = add nuw nsw i16 %iv, 1 - %exit.cond = icmp eq i16 %iv.next, 32 - br i1 %exit.cond, label %exit, label %loop - -exit: - ret void -} - ; In the following test, the sink is loop-invariant. define void @type_size_equivalence_sink_loopinv(ptr nocapture %vec, i64 %n) { diff --git a/llvm/test/Analysis/ScalarEvolution/trip-multiple-guard-info.ll b/llvm/test/Analysis/ScalarEvolution/trip-multiple-guard-info.ll index bf140c7fa216a..b1fe7b1b2b7ee 100644 --- a/llvm/test/Analysis/ScalarEvolution/trip-multiple-guard-info.ll +++ b/llvm/test/Analysis/ScalarEvolution/trip-multiple-guard-info.ll @@ -574,5 +574,164 @@ exit: ret void } +define void @test_ptr_aligned_by_2_and_4_via_assumption(ptr %start, ptr %end) { +; CHECK-LABEL: 'test_ptr_aligned_by_2_and_4_via_assumption' +; CHECK-NEXT: Classifying expressions for: @test_ptr_aligned_by_2_and_4_via_assumption +; CHECK-NEXT: %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%start,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = getelementptr i8, ptr %iv, i64 4 +; CHECK-NEXT: --> {(4 + %start),+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_ptr_aligned_by_2_and_4_via_assumption +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4611686018427387903 +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %start, i64 2) ] + call void @llvm.assume(i1 true) [ "align"(ptr %end, i64 4) ] + br label %loop + +loop: + %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] + store ptr %iv, ptr %iv + %iv.next = getelementptr i8, ptr %iv, i64 4 + %ec = icmp ne ptr %iv.next, %end + br i1 %ec, label %loop, label %exit + +exit: + ret void +} + +define void @test_ptrs_aligned_by_4_via_assumption(ptr %start, ptr %end) { +; CHECK-LABEL: 'test_ptrs_aligned_by_4_via_assumption' +; CHECK-NEXT: Classifying expressions for: @test_ptrs_aligned_by_4_via_assumption +; CHECK-NEXT: %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%start,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = getelementptr i8, ptr %iv, i64 4 +; CHECK-NEXT: --> {(4 + %start),+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_ptrs_aligned_by_4_via_assumption +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4611686018427387903 +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %start, i64 4) ] + call void @llvm.assume(i1 true) [ "align"(ptr %end, i64 4) ] + br label %loop + +loop: + %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] + store ptr %iv, ptr %iv + %iv.next = getelementptr i8, ptr %iv, i64 4 + %ec = icmp ne ptr %iv.next, %end + br i1 %ec, label %loop, label %exit + +exit: + ret void +} + +define void @test_ptrs_aligned_by_8_via_assumption(ptr %start, ptr %end) { +; CHECK-LABEL: 'test_ptrs_aligned_by_8_via_assumption' +; CHECK-NEXT: Classifying expressions for: @test_ptrs_aligned_by_8_via_assumption +; CHECK-NEXT: %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%start,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = getelementptr i8, ptr %iv, i64 4 +; CHECK-NEXT: --> {(4 + %start),+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_ptrs_aligned_by_8_via_assumption +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4611686018427387903 +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %start, i64 8) ] + call void @llvm.assume(i1 true) [ "align"(ptr %end, i64 8) ] + br label %loop + +loop: + %iv = phi ptr [ %start, %entry ], [ %iv.next, %loop ] + store ptr %iv, ptr %iv + %iv.next = getelementptr i8, ptr %iv, i64 4 + %ec = icmp ne ptr %iv.next, %end + br i1 %ec, label %loop, label %exit + +exit: + ret void +} + +declare i1 @cond() + +define void @test_ptr_aligned_by_4_via_assumption_multiple_loop_predecessors(ptr %start, ptr %end) { +; CHECK-LABEL: 'test_ptr_aligned_by_4_via_assumption_multiple_loop_predecessors' +; CHECK-NEXT: Classifying expressions for: @test_ptr_aligned_by_4_via_assumption_multiple_loop_predecessors +; CHECK-NEXT: %c = call i1 @cond() +; CHECK-NEXT: --> %c U: full-set S: full-set +; CHECK-NEXT: %iv = phi ptr [ %start, %then ], [ %start, %else ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%start,+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %iv.next = getelementptr i8, ptr %iv, i64 4 +; CHECK-NEXT: --> {(4 + %start),+,4}<%loop> U: full-set S: full-set Exits: <> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @test_ptr_aligned_by_4_via_assumption_multiple_loop_predecessors +; CHECK-NEXT: Loop %loop: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: Loop %loop: Predicated backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated constant max backedge-taken count is i64 4611686018427387903 +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; CHECK-NEXT: Loop %loop: Predicated symbolic max backedge-taken count is ((-4 + (-1 * (ptrtoint ptr %start to i64)) + (ptrtoint ptr %end to i64)) /u 4) +; CHECK-NEXT: Predicates: +; CHECK-NEXT: Equal predicate: (zext i2 ((trunc i64 (ptrtoint ptr %end to i64) to i2) + (-1 * (trunc i64 (ptrtoint ptr %start to i64) to i2))) to i64) == 0 +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %start, i64 2) ] + call void @llvm.assume(i1 true) [ "align"(ptr %end, i64 4) ] + %c = call i1 @cond() + br i1 %c, label %then, label %else + +then: + br label %loop + +else: + br label %loop + +loop: + %iv = phi ptr [ %start, %then] , [ %start, %else ], [ %iv.next, %loop ] + store ptr %iv, ptr %iv + %iv.next = getelementptr i8, ptr %iv, i64 4 + %ec = icmp ne ptr %iv.next, %end + br i1 %ec, label %loop, label %exit + +exit: + ret void +} + declare void @llvm.assume(i1) declare void @llvm.experimental.guard(i1, ...) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir index 9ad28a5487233..ba867f4ae0c26 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -540,8 +540,9 @@ # DEBUG-NEXT: .. the first uncovered type index: 1, OK # DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FLDEXP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices -# DEBUG-NEXT:.. type index coverage check SKIPPED: no rules defined -# DEBUG-NEXT:.. imm index coverage check SKIPPED: no rules defined +# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}} +# DEBUG-NEXT: .. the first uncovered type index: 2, OK +# DEBUG-NEXT: .. the first uncovered imm index: 0, OK # DEBUG-NEXT: G_FFREXP (opcode {{[0-9]+}}): 2 type indices, 0 imm indices # DEBUG-NEXT:.. type index coverage check SKIPPED: no rules defined # DEBUG-NEXT:.. imm index coverage check SKIPPED: no rules defined diff --git a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll index 1c216e7357215..e371748a43b29 100644 --- a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll +++ b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll @@ -11,6 +11,16 @@ entry: ret <4 x i16> %1 } +define <4 x half> @v4bf16_to_v4f16(float, <4 x bfloat> %a) nounwind { +; CHECK-LABEL: v4bf16_to_v4f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret +entry: + %1 = bitcast <4 x bfloat> %a to <4 x half> + ret <4 x half> %1 +} + define <2 x i32> @v4bf16_to_v2i32(float, <4 x bfloat> %a) nounwind { ; CHECK-LABEL: v4bf16_to_v2i32: ; CHECK: // %bb.0: // %entry @@ -82,6 +92,16 @@ entry: ret <4 x bfloat> %1 } +define <4 x bfloat> @v4f16_to_v4bf16(float, <4 x half> %a) nounwind { +; CHECK-LABEL: v4f16_to_v4bf16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: fmov d0, d1 +; CHECK-NEXT: ret +entry: + %1 = bitcast <4 x half> %a to <4 x bfloat> + ret <4 x bfloat> %1 +} + define <4 x bfloat> @v2i32_to_v4bf16(float, <2 x i32> %a) nounwind { ; CHECK-LABEL: v2i32_to_v4bf16: ; CHECK: // %bb.0: // %entry @@ -152,6 +172,16 @@ entry: ret <8 x i16> %1 } +define <8 x half> @v8bf16_to_v8f16(float, <8 x bfloat> %a) nounwind { +; CHECK-LABEL: v8bf16_to_v8f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %1 = bitcast <8 x bfloat> %a to <8 x half> + ret <8 x half> %1 +} + define <4 x i32> @v8bf16_to_v4i32(float, <8 x bfloat> %a) nounwind { ; CHECK-LABEL: v8bf16_to_v4i32: ; CHECK: // %bb.0: // %entry @@ -202,6 +232,16 @@ entry: ret <8 x bfloat> %1 } +define <8 x bfloat> @v8f16_to_v8bf16(float, <8 x half> %a) nounwind { +; CHECK-LABEL: v8f16_to_v8bf16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov v0.16b, v1.16b +; CHECK-NEXT: ret +entry: + %1 = bitcast <8 x half> %a to <8 x bfloat> + ret <8 x bfloat> %1 +} + define <8 x bfloat> @v4i32_to_v8bf16(float, <4 x i32> %a) nounwind { ; CHECK-LABEL: v4i32_to_v8bf16: ; CHECK: // %bb.0: // %entry diff --git a/llvm/test/CodeGen/AArch64/ldexp.ll b/llvm/test/CodeGen/AArch64/ldexp.ll index 6019fa1490e3d..1e35bd627a199 100644 --- a/llvm/test/CodeGen/AArch64/ldexp.ll +++ b/llvm/test/CodeGen/AArch64/ldexp.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 ; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck -check-prefixes=SVE,SVELINUX %s +; RUN: llc -mtriple=aarch64 -global-isel < %s -o - | FileCheck -check-prefixes=GISEL %s ; RUN: llc -mtriple=aarch64-windows-msvc -mattr=+sve < %s -o - | FileCheck -check-prefixes=SVE,SVEWINDOWS %s ; RUN: llc -mtriple=aarch64-windows-msvc < %s -o - | FileCheck -check-prefixes=WINDOWS %s @@ -15,6 +16,10 @@ define double @testExp(double %val, i32 %a) { ; SVE-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE-NEXT: ret ; +; GISEL-LABEL: testExp: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexp +; ; WINDOWS-LABEL: testExp: ; WINDOWS: // %bb.0: // %entry ; WINDOWS-NEXT: b ldexp @@ -37,6 +42,10 @@ define double @testExpIntrinsic(double %val, i32 %a) { ; SVE-NEXT: // kill: def $d0 killed $d0 killed $z0 ; SVE-NEXT: ret ; +; GISEL-LABEL: testExpIntrinsic: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexp +; ; WINDOWS-LABEL: testExpIntrinsic: ; WINDOWS: // %bb.0: // %entry ; WINDOWS-NEXT: b ldexp @@ -55,6 +64,10 @@ define float @testExpf(float %val, i32 %a) { ; SVELINUX-NEXT: // kill: def $s0 killed $s0 killed $z0 ; SVELINUX-NEXT: ret ; +; GISEL-LABEL: testExpf: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexpf +; ; SVEWINDOWS-LABEL: testExpf: ; SVEWINDOWS: // %bb.0: // %entry ; SVEWINDOWS-NEXT: b ldexpf @@ -77,6 +90,10 @@ define float @testExpfIntrinsic(float %val, i32 %a) { ; SVE-NEXT: // kill: def $s0 killed $s0 killed $z0 ; SVE-NEXT: ret ; +; GISEL-LABEL: testExpfIntrinsic: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexpf +; ; WINDOWS-LABEL: testExpfIntrinsic: ; WINDOWS: .seh_proc testExpfIntrinsic ; WINDOWS-NEXT: // %bb.0: // %entry @@ -98,6 +115,90 @@ entry: ret float %call } +define <2 x float> @test_ldexp_v2f32_v2i32(<2 x float> %Val, <2 x i32> %Exp) { +; SVE-LABEL: test_ldexp_v2f32_v2i32: +; SVE: // %bb.0: +; SVE-NEXT: // kill: def $d1 killed $d1 def $z1 +; SVE-NEXT: mov w8, v1.s[1] +; SVE-NEXT: // kill: def $d0 killed $d0 def $z0 +; SVE-NEXT: mov s2, v0.s[1] +; SVE-NEXT: ptrue p0.s +; SVE-NEXT: fscale z0.s, p0/m, z0.s, z1.s +; SVE-NEXT: fmov s3, w8 +; SVE-NEXT: fscale z2.s, p0/m, z2.s, z3.s +; SVE-NEXT: mov v0.s[1], v2.s[0] +; SVE-NEXT: // kill: def $d0 killed $d0 killed $z0 +; SVE-NEXT: ret +; +; GISEL-LABEL: test_ldexp_v2f32_v2i32: +; GISEL: // %bb.0: +; GISEL-NEXT: sub sp, sp, #48 +; GISEL-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill +; GISEL-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; GISEL-NEXT: .cfi_def_cfa_offset 48 +; GISEL-NEXT: .cfi_offset w30, -16 +; GISEL-NEXT: .cfi_offset b8, -24 +; GISEL-NEXT: .cfi_offset b9, -32 +; GISEL-NEXT: // kill: def $d1 killed $d1 def $q1 +; GISEL-NEXT: fmov w0, s1 +; GISEL-NEXT: // kill: def $d0 killed $d0 def $q0 +; GISEL-NEXT: mov s8, v0.s[1] +; GISEL-NEXT: mov s9, v1.s[1] +; GISEL-NEXT: // kill: def $s0 killed $s0 killed $q0 +; GISEL-NEXT: bl ldexpf +; GISEL-NEXT: // kill: def $s0 killed $s0 def $q0 +; GISEL-NEXT: str q0, [sp] // 16-byte Folded Spill +; GISEL-NEXT: fmov w0, s9 +; GISEL-NEXT: fmov s0, s8 +; GISEL-NEXT: bl ldexpf +; GISEL-NEXT: ldr q1, [sp] // 16-byte Folded Reload +; GISEL-NEXT: // kill: def $s0 killed $s0 def $q0 +; GISEL-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload +; GISEL-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; GISEL-NEXT: mov v1.s[1], v0.s[0] +; GISEL-NEXT: fmov d0, d1 +; GISEL-NEXT: add sp, sp, #48 +; GISEL-NEXT: ret +; +; WINDOWS-LABEL: test_ldexp_v2f32_v2i32: +; WINDOWS: .seh_proc test_ldexp_v2f32_v2i32 +; WINDOWS-NEXT: // %bb.0: +; WINDOWS-NEXT: sub sp, sp, #48 +; WINDOWS-NEXT: .seh_stackalloc 48 +; WINDOWS-NEXT: str x30, [sp, #32] // 8-byte Folded Spill +; WINDOWS-NEXT: .seh_save_reg x30, 32 +; WINDOWS-NEXT: .seh_endprologue +; WINDOWS-NEXT: // kill: def $d0 killed $d0 def $q0 +; WINDOWS-NEXT: mov s2, v0.s[1] +; WINDOWS-NEXT: // kill: def $d1 killed $d1 def $q1 +; WINDOWS-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill +; WINDOWS-NEXT: mov w0, v1.s[1] +; WINDOWS-NEXT: fcvt d0, s2 +; WINDOWS-NEXT: bl ldexp +; WINDOWS-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; WINDOWS-NEXT: fcvt s0, d0 +; WINDOWS-NEXT: fcvt d1, s1 +; WINDOWS-NEXT: str q0, [sp, #16] // 16-byte Folded Spill +; WINDOWS-NEXT: ldr q0, [sp] // 16-byte Folded Reload +; WINDOWS-NEXT: fmov w0, s0 +; WINDOWS-NEXT: fmov d0, d1 +; WINDOWS-NEXT: bl ldexp +; WINDOWS-NEXT: fcvt s0, d0 +; WINDOWS-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload +; WINDOWS-NEXT: mov v0.s[1], v1.s[0] +; WINDOWS-NEXT: // kill: def $d0 killed $d0 killed $q0 +; WINDOWS-NEXT: .seh_startepilogue +; WINDOWS-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload +; WINDOWS-NEXT: .seh_save_reg x30, 32 +; WINDOWS-NEXT: add sp, sp, #48 +; WINDOWS-NEXT: .seh_stackalloc 48 +; WINDOWS-NEXT: .seh_endepilogue +; WINDOWS-NEXT: ret +; WINDOWS-NEXT: .seh_endfunclet +; WINDOWS-NEXT: .seh_endproc + %result = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %Val, <2 x i32> %Exp) + ret <2 x float> %result +} declare float @ldexpf(float, i32) memory(none) @@ -106,6 +207,10 @@ define fp128 @testExpl(fp128 %val, i32 %a) { ; SVE: // %bb.0: // %entry ; SVE-NEXT: b ldexpl ; +; GISEL-LABEL: testExpl: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: b ldexpl +; ; WINDOWS-LABEL: testExpl: ; WINDOWS: // %bb.0: // %entry ; WINDOWS-NEXT: b ldexpl @@ -126,6 +231,17 @@ define half @testExpf16(half %val, i32 %a) { ; SVE-NEXT: fcvt h0, s0 ; SVE-NEXT: ret ; +; GISEL-LABEL: testExpf16: +; GISEL: // %bb.0: // %entry +; GISEL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; GISEL-NEXT: .cfi_def_cfa_offset 16 +; GISEL-NEXT: .cfi_offset w30, -16 +; GISEL-NEXT: fcvt s0, h0 +; GISEL-NEXT: bl ldexpf +; GISEL-NEXT: fcvt h0, s0 +; GISEL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; GISEL-NEXT: ret +; ; WINDOWS-LABEL: testExpf16: ; WINDOWS: .seh_proc testExpf16 ; WINDOWS-NEXT: // %bb.0: // %entry diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll index fc43c714d69b3..b6dee97ea2962 100644 --- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll +++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-SDAG ; A simple EH test case that corresponds to the following C++ source: ; @@ -87,6 +88,90 @@ define void @za_with_raii(i1 %fail) "aarch64_inout_za" personality ptr @__gxx_pe ; CHECK-NEXT: mov x0, x19 ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl _Unwind_Resume +; +; CHECK-SDAG-LABEL: za_with_raii: +; CHECK-SDAG: .Lfunc_begin0: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception0 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -8 +; CHECK-SDAG-NEXT: .cfi_offset w20, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: tbnz w0, #0, .LBB0_2 +; CHECK-SDAG-NEXT: // %bb.1: // %return_normally +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: b shared_za_call +; CHECK-SDAG-NEXT: .LBB0_2: // %throw_exception +; CHECK-SDAG-NEXT: sub x20, x29, #16 +; CHECK-SDAG-NEXT: mov w0, #8 // =0x8 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: bl __cxa_allocate_exception +; CHECK-SDAG-NEXT: mov x8, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x9, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x9, .LBB0_4 +; CHECK-SDAG-NEXT: // %bb.3: // %throw_exception +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_4: // %throw_exception +; CHECK-SDAG-NEXT: adrp x9, .L.str +; CHECK-SDAG-NEXT: add x9, x9, :lo12:.L.str +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: str x9, [x8] +; CHECK-SDAG-NEXT: .Ltmp0: // EH_LABEL +; CHECK-SDAG-NEXT: adrp x1, :got:typeinfo_for_char_const_ptr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: mov x0, x8 +; CHECK-SDAG-NEXT: ldr x1, [x1, :got_lo12:typeinfo_for_char_const_ptr] +; CHECK-SDAG-NEXT: mov x2, xzr +; CHECK-SDAG-NEXT: bl __cxa_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_6 +; CHECK-SDAG-NEXT: // %bb.5: // %throw_exception +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_6: // %throw_exception +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp1: // EH_LABEL +; CHECK-SDAG-NEXT: // %bb.7: // %throw_fail +; CHECK-SDAG-NEXT: .LBB0_8: // %unwind_dtors +; CHECK-SDAG-NEXT: .Ltmp2: // EH_LABEL +; CHECK-SDAG-NEXT: mov x19, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_10 +; CHECK-SDAG-NEXT: // %bb.9: // %unwind_dtors +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_10: // %unwind_dtors +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20 +; CHECK-SDAG-NEXT: bl _Unwind_Resume +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB0_12 +; CHECK-SDAG-NEXT: // %bb.11: // %unwind_dtors +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB0_12: // %unwind_dtors +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr br i1 %fail, label %throw_exception, label %return_normally throw_exception: @@ -124,7 +209,7 @@ throw_fail: ; } ; shared_za_call(); ; } -define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { +define void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 { ; CHECK-LABEL: try_catch: ; CHECK: .Lfunc_begin1: ; CHECK-NEXT: .cfi_startproc @@ -142,11 +227,11 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: msub x9, x8, x8, x9 ; CHECK-NEXT: mov sp, x9 ; CHECK-NEXT: stp x9, x8, [x29, #-16] -; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: .Ltmp3: // EH_LABEL ; CHECK-NEXT: sub x8, x29, #16 ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl may_throw -; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: .Ltmp4: // EH_LABEL ; CHECK-NEXT: .LBB1_1: // %after_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -160,7 +245,7 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: b shared_za_call ; CHECK-NEXT: .LBB1_4: // %catch -; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: .Ltmp5: // EH_LABEL ; CHECK-NEXT: bl __cxa_begin_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -175,6 +260,78 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per ; CHECK-NEXT: msr TPIDR2_EL0, x8 ; CHECK-NEXT: bl __cxa_end_catch ; CHECK-NEXT: b .LBB1_1 +; +; CHECK-SDAG-LABEL: try_catch: +; CHECK-SDAG: .Lfunc_begin1: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception1 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: .Ltmp3: // EH_LABEL +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_2 +; CHECK-SDAG-NEXT: // %bb.1: +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_2: +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .Ltmp4: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB1_3: // %after_catch +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: b shared_za_call +; CHECK-SDAG-NEXT: .LBB1_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp5: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB1_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB1_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB1_3 invoke void @may_throw() to label %after_catch unwind label %catch @@ -235,16 +392,16 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx ; CHECK-NEXT: zero {za} ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: smstart za -; CHECK-NEXT: .Ltmp6: +; CHECK-NEXT: .Ltmp6: // EH_LABEL ; CHECK-NEXT: bl shared_za_call -; CHECK-NEXT: .Ltmp7: +; CHECK-NEXT: .Ltmp7: // EH_LABEL ; CHECK-NEXT: .LBB2_3: // %exit ; CHECK-NEXT: smstop za ; CHECK-NEXT: mov sp, x29 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB2_4: // %catch -; CHECK-NEXT: .Ltmp8: +; CHECK-NEXT: .Ltmp8: // EH_LABEL ; CHECK-NEXT: bl __cxa_begin_catch ; CHECK-NEXT: smstart za ; CHECK-NEXT: mrs x8, TPIDR2_EL0 @@ -260,6 +417,78 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx ; CHECK-NEXT: bl __cxa_end_catch ; CHECK-NEXT: msr TPIDR2_EL0, xzr ; CHECK-NEXT: b .LBB2_3 +; +; CHECK-SDAG-LABEL: try_catch_shared_za_callee: +; CHECK-SDAG: .Lfunc_begin2: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception2 +; CHECK-SDAG-NEXT: // %bb.0: // %prelude +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: sub sp, sp, #16 +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: rdsvl x8, #1 +; CHECK-SDAG-NEXT: mov x9, sp +; CHECK-SDAG-NEXT: msub x9, x8, x8, x9 +; CHECK-SDAG-NEXT: mov sp, x9 +; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16] +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: cbz x8, .LBB2_2 +; CHECK-SDAG-NEXT: // %bb.1: // %save.za +; CHECK-SDAG-NEXT: bl __arm_tpidr2_save +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: .LBB2_2: +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: zero {za} +; CHECK-SDAG-NEXT: .Ltmp6: // EH_LABEL +; CHECK-SDAG-NEXT: bl shared_za_call +; CHECK-SDAG-NEXT: .Ltmp7: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB2_3: // %exit +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB2_4: // %catch +; CHECK-SDAG-NEXT: .Ltmp8: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: sub x19, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_6 +; CHECK-SDAG-NEXT: // %bb.5: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_6: // %catch +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_8 +; CHECK-SDAG-NEXT: // %bb.7: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_8: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: bl noexcept_shared_za_call +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19 +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-SDAG-NEXT: sub x0, x29, #16 +; CHECK-SDAG-NEXT: cbnz x8, .LBB2_10 +; CHECK-SDAG-NEXT: // %bb.9: // %catch +; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore +; CHECK-SDAG-NEXT: .LBB2_10: // %catch +; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr +; CHECK-SDAG-NEXT: b .LBB2_3 invoke void @shared_za_call() #4 to label %exit unwind label %catch catch: @@ -275,6 +504,234 @@ exit: ret void } +; A simple ZT0 exception example that corresponds to: +; +; struct ZT0Resource { +; ~ZT0Resource() __arm_inout("zt0") { +; shared_zt0_call(); // simulate cleanup in destructor +; } +; }; +; +; void za_with_raii() __arm_inout("zt0") { +; ZT0Resource r; +; may_throw(); +; } +; +; This code may require reloading ZT0 in the cleanup for ~ZT0Resource(). +; +; FIXME: Codegen with `-aarch64-new-sme-abi` is broken with ZT0 (as it is not implemented). +define void @try_catch_shared_zt0_callee() "aarch64_inout_zt0" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_shared_zt0_callee: +; CHECK: .Lfunc_begin3: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception3 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: sub sp, sp, #80 +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: rdsvl x8, #1 +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: msub x9, x8, x8, x9 +; CHECK-NEXT: mov sp, x9 +; CHECK-NEXT: stp x9, x8, [x29, #-80] +; CHECK-NEXT: .Ltmp9: // EH_LABEL +; CHECK-NEXT: sub x19, x29, #64 +; CHECK-NEXT: str zt0, [x19] +; CHECK-NEXT: smstop za +; CHECK-NEXT: bl may_throw +; CHECK-NEXT: smstart za +; CHECK-NEXT: ldr zt0, [x19] +; CHECK-NEXT: .Ltmp10: // EH_LABEL +; CHECK-NEXT: // %bb.1: // %return_normally +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB3_2: // %unwind_dtors +; CHECK-NEXT: .Ltmp11: // EH_LABEL +; CHECK-NEXT: sub x20, x29, #64 +; CHECK-NEXT: mov x19, x0 +; CHECK-NEXT: smstart za +; CHECK-NEXT: mrs x8, TPIDR2_EL0 +; CHECK-NEXT: sub x0, x29, #80 +; CHECK-NEXT: cbnz x8, .LBB3_4 +; CHECK-NEXT: // %bb.3: // %unwind_dtors +; CHECK-NEXT: bl __arm_tpidr2_restore +; CHECK-NEXT: .LBB3_4: // %unwind_dtors +; CHECK-NEXT: msr TPIDR2_EL0, xzr +; CHECK-NEXT: bl shared_zt0_call +; CHECK-NEXT: str zt0, [x20] +; CHECK-NEXT: smstop za +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl _Unwind_Resume +; CHECK-NEXT: smstart za +; CHECK-NEXT: ldr zt0, [x20] +; +; CHECK-SDAG-LABEL: try_catch_shared_zt0_callee: +; CHECK-SDAG: .Lfunc_begin3: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception3 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: sub sp, sp, #96 +; CHECK-SDAG-NEXT: str x30, [sp, #64] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill +; CHECK-SDAG-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SDAG-NEXT: .cfi_offset w19, -8 +; CHECK-SDAG-NEXT: .cfi_offset w20, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -32 +; CHECK-SDAG-NEXT: .Ltmp9: // EH_LABEL +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: str zt0, [x19] +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x19] +; CHECK-SDAG-NEXT: .Ltmp10: // EH_LABEL +; CHECK-SDAG-NEXT: // %bb.1: // %return_normally +; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: add sp, sp, #96 +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB3_2: // %unwind_dtors +; CHECK-SDAG-NEXT: .Ltmp11: // EH_LABEL +; CHECK-SDAG-NEXT: mov x20, sp +; CHECK-SDAG-NEXT: mov x19, x0 +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x20] +; CHECK-SDAG-NEXT: bl shared_zt0_call +; CHECK-SDAG-NEXT: str zt0, [x20] +; CHECK-SDAG-NEXT: smstop za +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl _Unwind_Resume +; CHECK-SDAG-NEXT: smstart za +; CHECK-SDAG-NEXT: ldr zt0, [x20] + invoke void @may_throw() + to label %return_normally unwind label %unwind_dtors + +unwind_dtors: + %5 = landingpad { ptr, i32 } + cleanup + tail call void @shared_zt0_call() + resume { ptr, i32 } %5 + +return_normally: + ret void +} + +; This example corresponds to: +; +; __arm_agnostic("sme_za_state") void try_catch_agnostic_za() +; { +; try { +; may_throw(); +; } catch(...) { +; } +; } +; +; In this example we must execute __arm_sme_restore once we enter the catch block +; (before executing __arm_sme_save again, which would invalidate the prior save). +define void @try_catch_agnostic_za() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 { +; CHECK-LABEL: try_catch_agnostic_za: +; CHECK: .Lfunc_begin4: +; CHECK-NEXT: .cfi_startproc +; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-NEXT: .cfi_lsda 28, .Lexception4 +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: .cfi_def_cfa w29, 32 +; CHECK-NEXT: .cfi_offset w19, -16 +; CHECK-NEXT: .cfi_offset w30, -24 +; CHECK-NEXT: .cfi_offset w29, -32 +; CHECK-NEXT: bl __arm_sme_state_size +; CHECK-NEXT: sub sp, sp, x0 +; CHECK-NEXT: mov x19, sp +; CHECK-NEXT: .Ltmp12: // EH_LABEL +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_save +; CHECK-NEXT: bl may_throw +; CHECK-NEXT: .Ltmp13: // EH_LABEL +; CHECK-NEXT: .LBB4_1: // %exit +; CHECK-NEXT: mov x0, x19 +; CHECK-NEXT: bl __arm_sme_restore +; CHECK-NEXT: mov sp, x29 +; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-NEXT: ret +; CHECK-NEXT: .LBB4_2: // %catch +; CHECK-NEXT: .Ltmp14: // EH_LABEL +; CHECK-NEXT: bl __cxa_begin_catch +; CHECK-NEXT: bl __cxa_end_catch +; CHECK-NEXT: b .LBB4_1 +; +; CHECK-SDAG-LABEL: try_catch_agnostic_za: +; CHECK-SDAG: .Lfunc_begin4: +; CHECK-SDAG-NEXT: .cfi_startproc +; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0 +; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception4 +; CHECK-SDAG-NEXT: // %bb.0: +; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill +; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill +; CHECK-SDAG-NEXT: mov x29, sp +; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32 +; CHECK-SDAG-NEXT: .cfi_offset w19, -16 +; CHECK-SDAG-NEXT: .cfi_offset w30, -24 +; CHECK-SDAG-NEXT: .cfi_offset w29, -32 +; CHECK-SDAG-NEXT: bl __arm_sme_state_size +; CHECK-SDAG-NEXT: sub sp, sp, x0 +; CHECK-SDAG-NEXT: mov x19, sp +; CHECK-SDAG-NEXT: .Ltmp12: // EH_LABEL +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl may_throw +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: .Ltmp13: // EH_LABEL +; CHECK-SDAG-NEXT: .LBB4_1: // %exit +; CHECK-SDAG-NEXT: mov sp, x29 +; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload +; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload +; CHECK-SDAG-NEXT: ret +; CHECK-SDAG-NEXT: .LBB4_2: // %catch +; CHECK-SDAG-NEXT: .Ltmp14: // EH_LABEL +; CHECK-SDAG-NEXT: mov x1, x0 +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: mov x0, x1 +; CHECK-SDAG-NEXT: bl __cxa_begin_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_save +; CHECK-SDAG-NEXT: bl __cxa_end_catch +; CHECK-SDAG-NEXT: mov x0, x19 +; CHECK-SDAG-NEXT: bl __arm_sme_restore +; CHECK-SDAG-NEXT: b .LBB4_1 + invoke void @may_throw() + to label %exit unwind label %catch +catch: + %eh_info = landingpad { ptr, i32 } + catch ptr null + %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0 + tail call ptr @__cxa_begin_catch(ptr %exception_ptr) + tail call void @__cxa_end_catch() + br label %exit + +exit: + ret void +} + declare ptr @__cxa_allocate_exception(i64) declare void @__cxa_throw(ptr, ptr, ptr) declare ptr @__cxa_begin_catch(ptr) @@ -284,3 +741,4 @@ declare i32 @__gxx_personality_v0(...) declare void @may_throw() declare void @shared_za_call() "aarch64_inout_za" declare void @noexcept_shared_za_call() "aarch64_inout_za" +declare void @shared_zt0_call() "aarch64_inout_zt0" diff --git a/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll b/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll index e65f401bcf68a..7fcb29d367006 100644 --- a/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll +++ b/llvm/test/CodeGen/AMDGPU/alignbit-pat.ll @@ -1,11 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck -check-prefix=GCN %s -; GCN-LABEL: {{^}}alignbit_shr_pat: -; GCN-DAG: s_load_dword s[[SHR:[0-9]+]] -; GCN-DAG: load_dwordx2 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]] -; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], s[[SHR]] - define amdgpu_kernel void @alignbit_shr_pat(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) { +; GCN-LABEL: alignbit_shr_pat: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_load_dword s8, s[4:5], 0xd +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_and_b32 s0, s8, 31 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s0 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp3 = and i32 %arg2, 31 @@ -16,12 +29,24 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_v: -; GCN-DAG: load_dword v[[SHR:[0-9]+]], -; GCN-DAG: load_dwordx2 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]] -; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], v[[SHR]] - define amdgpu_kernel void @alignbit_shr_pat_v(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) { +; GCN-LABEL: alignbit_shr_pat_v: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, 0 +; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b64 s[4:5], s[0:1] +; GCN-NEXT: buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64 +; GCN-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; GCN-NEXT: s_mov_b64 s[4:5], s[2:3] +; GCN-NEXT: buffer_load_dword v0, v[1:2], s[4:7], 0 addr64 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_alignbit_b32 v0, v4, v3, v0 +; GCN-NEXT: buffer_store_dword v0, v[1:2], s[4:7], 0 addr64 +; GCN-NEXT: s_endpgm bb: %tid = tail call i32 @llvm.amdgcn.workitem.id.x() %gep1 = getelementptr inbounds i64, ptr addrspace(1) %arg, i32 %tid @@ -36,12 +61,24 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and30: -; Negative test, wrong constant -; GCN: v_lshr_b64 -; GCN-NOT: v_alignbit_b32 - define amdgpu_kernel void @alignbit_shr_pat_wrong_and30(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) { +; GCN-LABEL: alignbit_shr_pat_wrong_and30: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_load_dword s8, s[4:5], 0xd +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_and_b32 s0, s8, 30 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s0 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp3 = and i32 %arg2, 30 @@ -52,12 +89,23 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and63: -; Negative test, wrong constant -; GCN: v_lshr_b64 -; GCN-NOT: v_alignbit_b32 - define amdgpu_kernel void @alignbit_shr_pat_wrong_and63(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1, i32 %arg2) { +; GCN-LABEL: alignbit_shr_pat_wrong_and63: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_load_dword s8, s[4:5], 0xd +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], s8 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp3 = and i32 %arg2, 63 @@ -68,11 +116,22 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_const30: -; GCN: load_dwordx2 v[[[LO:[0-9]+]]:[[HI:[0-9]+]]] -; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], 30 - define amdgpu_kernel void @alignbit_shr_pat_const30(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) { +; GCN-LABEL: alignbit_shr_pat_const30: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s0 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], 30 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp5 = lshr i64 %tmp, 30 @@ -81,12 +140,22 @@ bb: ret void } -; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_const33: -; Negative test, shift amount more than 31 -; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}} -; GCN-NOT: v_alignbit_b32 - define amdgpu_kernel void @alignbit_shr_pat_wrong_const33(ptr addrspace(1) nocapture readonly %arg, ptr addrspace(1) nocapture %arg1) { +; GCN-LABEL: alignbit_shr_pat_wrong_const33: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s4, s2 +; GCN-NEXT: s_mov_b32 s5, s3 +; GCN-NEXT: s_mov_b32 s2, s6 +; GCN-NEXT: s_mov_b32 s3, s7 +; GCN-NEXT: buffer_load_dword v0, off, s[0:3], 0 offset:4 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_lshrrev_b32_e32 v0, 1, v0 +; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = load i64, ptr addrspace(1) %arg, align 8 %tmp5 = lshr i64 %tmp, 33 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll index 8424917b2d604..815b9f294be8f 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll @@ -7526,831 +7526,1167 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v32i32_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v41, s30, 0 -; SI-NEXT: v_writelane_b32 v41, s31, 1 -; SI-NEXT: v_writelane_b32 v41, s34, 2 -; SI-NEXT: v_writelane_b32 v41, s35, 3 -; SI-NEXT: v_writelane_b32 v41, s36, 4 -; SI-NEXT: v_writelane_b32 v41, s37, 5 -; SI-NEXT: v_writelane_b32 v41, s38, 6 -; SI-NEXT: v_writelane_b32 v41, s39, 7 -; SI-NEXT: v_writelane_b32 v41, s48, 8 -; SI-NEXT: v_writelane_b32 v41, s49, 9 -; SI-NEXT: v_writelane_b32 v41, s50, 10 -; SI-NEXT: v_writelane_b32 v41, s51, 11 -; SI-NEXT: v_writelane_b32 v41, s52, 12 -; SI-NEXT: v_writelane_b32 v41, s53, 13 -; SI-NEXT: v_writelane_b32 v41, s54, 14 -; SI-NEXT: v_writelane_b32 v41, s55, 15 -; SI-NEXT: v_writelane_b32 v41, s64, 16 -; SI-NEXT: v_writelane_b32 v41, s65, 17 -; SI-NEXT: v_writelane_b32 v41, s66, 18 -; SI-NEXT: v_writelane_b32 v41, s67, 19 -; SI-NEXT: v_writelane_b32 v41, s68, 20 -; SI-NEXT: v_writelane_b32 v41, s69, 21 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_writelane_b32 v20, s69, 21 +; SI-NEXT: v_writelane_b32 v20, s70, 22 +; SI-NEXT: v_writelane_b32 v20, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s80, 24 +; SI-NEXT: v_writelane_b32 v20, s81, 25 +; SI-NEXT: v_writelane_b32 v20, s82, 26 +; SI-NEXT: v_writelane_b32 v20, s83, 27 +; SI-NEXT: v_writelane_b32 v20, s84, 28 +; SI-NEXT: v_writelane_b32 v20, s85, 29 +; SI-NEXT: v_writelane_b32 v20, s86, 30 +; SI-NEXT: v_writelane_b32 v20, s87, 31 +; SI-NEXT: v_writelane_b32 v20, s96, 32 +; SI-NEXT: v_writelane_b32 v20, s97, 33 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_writelane_b32 v41, s70, 22 -; SI-NEXT: v_readfirstlane_b32 s47, v1 -; SI-NEXT: v_readfirstlane_b32 s46, v2 -; SI-NEXT: v_readfirstlane_b32 s45, v3 -; SI-NEXT: v_readfirstlane_b32 s44, v4 -; SI-NEXT: v_readfirstlane_b32 s43, v5 -; SI-NEXT: v_readfirstlane_b32 s42, v6 -; SI-NEXT: v_readfirstlane_b32 s41, v7 -; SI-NEXT: v_readfirstlane_b32 s40, v8 -; SI-NEXT: v_readfirstlane_b32 s15, v9 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s12, v12 -; SI-NEXT: v_readfirstlane_b32 s11, v13 -; SI-NEXT: v_readfirstlane_b32 s10, v14 -; SI-NEXT: v_readfirstlane_b32 s9, v15 -; SI-NEXT: v_readfirstlane_b32 s8, v16 -; SI-NEXT: v_readfirstlane_b32 s7, v17 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v18 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_writelane_b32 v41, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s98, 34 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: v_writelane_b32 v20, s99, 35 +; SI-NEXT: ; implicit-def: $vgpr22 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v22, s45 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s9 -; SI-NEXT: v_mov_b32_e32 v9, s11 -; SI-NEXT: v_mov_b32_e32 v12, s13 -; SI-NEXT: v_mov_b32_e32 v15, s15 -; SI-NEXT: v_mov_b32_e32 v18, s41 -; SI-NEXT: v_mov_b32_e32 v21, s43 -; SI-NEXT: v_alignbit_b32 v24, s44, v22, 24 -; SI-NEXT: v_alignbit_b32 v25, s44, v22, 16 -; SI-NEXT: v_alignbit_b32 v26, s44, v22, 8 -; SI-NEXT: v_mov_b32_e32 v22, s47 -; SI-NEXT: v_mov_b32_e32 v23, s28 -; SI-NEXT: v_mov_b32_e32 v29, s26 -; SI-NEXT: v_mov_b32_e32 v35, s24 -; SI-NEXT: v_mov_b32_e32 v39, s22 -; SI-NEXT: v_mov_b32_e32 v50, s20 -; SI-NEXT: v_mov_b32_e32 v53, s18 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s8, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s8, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s8, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s12, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s12, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s12, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s14, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s14, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s14, v15, 8 -; SI-NEXT: v_alignbit_b32 v16, s40, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s40, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s40, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8 -; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24 -; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16 -; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8 -; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24 -; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8 -; SI-NEXT: v_alignbit_b32 v27, s27, v29, 24 -; SI-NEXT: v_alignbit_b32 v28, s27, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, s27, v29, 8 -; SI-NEXT: v_alignbit_b32 v33, s25, v35, 24 -; SI-NEXT: v_alignbit_b32 v34, s25, v35, 16 -; SI-NEXT: v_alignbit_b32 v35, s25, v35, 8 -; SI-NEXT: v_alignbit_b32 v37, s23, v39, 24 -; SI-NEXT: v_alignbit_b32 v38, s23, v39, 16 -; SI-NEXT: v_alignbit_b32 v39, s23, v39, 8 -; SI-NEXT: v_alignbit_b32 v48, s21, v50, 24 -; SI-NEXT: v_alignbit_b32 v49, s21, v50, 16 -; SI-NEXT: v_alignbit_b32 v50, s21, v50, 8 -; SI-NEXT: v_alignbit_b32 v51, s19, v53, 24 -; SI-NEXT: v_alignbit_b32 v52, s19, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, s19, v53, 8 -; SI-NEXT: v_alignbit_b32 v54, s17, v40, 24 -; SI-NEXT: v_alignbit_b32 v55, s17, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, s17, v40, 8 -; SI-NEXT: s_lshr_b32 s56, s6, 24 -; SI-NEXT: s_lshr_b32 s57, s6, 16 -; SI-NEXT: s_lshr_b32 s58, s6, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s60, s8, 16 -; SI-NEXT: s_lshr_b32 s61, s8, 8 -; SI-NEXT: s_lshr_b32 s62, s10, 24 -; SI-NEXT: s_lshr_b32 s63, s10, 16 -; SI-NEXT: s_lshr_b32 s72, s10, 8 -; SI-NEXT: s_lshr_b32 s73, s12, 24 -; SI-NEXT: s_lshr_b32 s74, s12, 16 -; SI-NEXT: s_lshr_b32 s75, s12, 8 -; SI-NEXT: s_lshr_b32 s76, s14, 24 -; SI-NEXT: s_lshr_b32 s77, s14, 16 -; SI-NEXT: s_lshr_b32 s78, s14, 8 -; SI-NEXT: s_lshr_b32 s79, s40, 24 -; SI-NEXT: s_lshr_b32 s88, s40, 16 -; SI-NEXT: s_lshr_b32 s89, s40, 8 -; SI-NEXT: s_lshr_b32 s90, s42, 24 -; SI-NEXT: s_lshr_b32 s91, s42, 16 -; SI-NEXT: s_lshr_b32 s92, s42, 8 -; SI-NEXT: s_lshr_b32 s93, s44, 24 -; SI-NEXT: s_lshr_b32 s94, s44, 16 -; SI-NEXT: s_lshr_b32 s95, s44, 8 -; SI-NEXT: s_lshr_b32 s30, s46, 24 -; SI-NEXT: s_lshr_b32 s31, s46, 16 -; SI-NEXT: s_lshr_b32 s34, s46, 8 -; SI-NEXT: s_lshr_b32 s35, s29, 24 -; SI-NEXT: s_lshr_b32 s36, s29, 16 -; SI-NEXT: s_lshr_b32 s37, s29, 8 -; SI-NEXT: s_lshr_b32 s38, s27, 24 -; SI-NEXT: s_lshr_b32 s39, s27, 16 -; SI-NEXT: s_lshr_b32 s48, s27, 8 -; SI-NEXT: s_lshr_b32 s49, s25, 24 -; SI-NEXT: s_lshr_b32 s50, s25, 16 -; SI-NEXT: s_lshr_b32 s51, s25, 8 -; SI-NEXT: s_lshr_b32 s52, s23, 24 -; SI-NEXT: s_lshr_b32 s53, s23, 16 -; SI-NEXT: s_lshr_b32 s54, s23, 8 -; SI-NEXT: s_lshr_b32 s55, s21, 24 -; SI-NEXT: s_lshr_b32 s64, s21, 16 -; SI-NEXT: s_lshr_b32 s65, s21, 8 -; SI-NEXT: s_lshr_b32 s66, s19, 24 -; SI-NEXT: s_lshr_b32 s67, s19, 16 -; SI-NEXT: s_lshr_b32 s68, s19, 8 -; SI-NEXT: s_lshr_b32 s69, s17, 24 -; SI-NEXT: s_lshr_b32 s70, s17, 16 -; SI-NEXT: s_lshr_b32 s71, s17, 8 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v22, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 40 +; SI-NEXT: v_writelane_b32 v22, s47, 41 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 38 +; SI-NEXT: v_writelane_b32 v22, s47, 39 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 36 +; SI-NEXT: v_writelane_b32 v22, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 34 +; SI-NEXT: v_writelane_b32 v22, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 32 +; SI-NEXT: v_writelane_b32 v22, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 30 +; SI-NEXT: v_writelane_b32 v22, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 28 +; SI-NEXT: v_writelane_b32 v22, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 26 +; SI-NEXT: v_writelane_b32 v22, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 24 +; SI-NEXT: v_writelane_b32 v22, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 22 +; SI-NEXT: v_writelane_b32 v22, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 20 +; SI-NEXT: v_writelane_b32 v22, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 18 +; SI-NEXT: v_writelane_b32 v22, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 16 +; SI-NEXT: v_writelane_b32 v22, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 14 +; SI-NEXT: v_writelane_b32 v22, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 12 +; SI-NEXT: v_writelane_b32 v22, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 10 +; SI-NEXT: v_writelane_b32 v22, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 6 +; SI-NEXT: v_writelane_b32 v22, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 4 +; SI-NEXT: v_writelane_b32 v22, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 2 +; SI-NEXT: v_writelane_b32 v22, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 0 +; SI-NEXT: s_lshr_b32 s50, s9, 24 +; SI-NEXT: s_lshr_b32 s51, s21, 8 +; SI-NEXT: s_lshr_b32 s48, s19, 24 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s19, 8 +; SI-NEXT: s_lshr_b32 s54, s17, 24 +; SI-NEXT: s_lshr_b32 s55, s17, 16 +; SI-NEXT: s_lshr_b32 s49, s17, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 1 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 40 +; SI-NEXT: v_writelane_b32 v22, s47, 41 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 38 +; SI-NEXT: v_writelane_b32 v22, s47, 39 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: s_add_i32 s7, s7, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: s_add_i32 s9, s9, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: s_add_i32 s11, s11, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: s_add_i32 s13, s13, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: s_add_i32 s41, s41, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: s_add_i32 s43, s43, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s43, 16 ; SI-NEXT: s_add_i32 s45, s45, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: s_add_i32 s29, s29, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: s_add_i32 s27, s27, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: v_writelane_b32 v21, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: s_add_i32 s6, s6, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 36 +; SI-NEXT: v_writelane_b32 v22, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 34 +; SI-NEXT: v_writelane_b32 v22, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 32 +; SI-NEXT: v_writelane_b32 v22, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 30 +; SI-NEXT: v_writelane_b32 v22, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 28 +; SI-NEXT: v_writelane_b32 v22, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 26 +; SI-NEXT: v_writelane_b32 v22, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 24 +; SI-NEXT: v_writelane_b32 v22, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 22 +; SI-NEXT: v_writelane_b32 v22, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 20 +; SI-NEXT: v_writelane_b32 v22, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 18 +; SI-NEXT: v_writelane_b32 v22, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 16 +; SI-NEXT: v_writelane_b32 v22, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 14 +; SI-NEXT: v_writelane_b32 v22, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 12 +; SI-NEXT: v_writelane_b32 v22, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 10 +; SI-NEXT: v_writelane_b32 v22, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: s_add_i32 s40, s40, 3 +; SI-NEXT: v_writelane_b32 v22, s46, 6 +; SI-NEXT: v_writelane_b32 v22, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 4 +; SI-NEXT: v_writelane_b32 v22, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 2 +; SI-NEXT: v_writelane_b32 v22, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s47, s47, 3 ; SI-NEXT: s_add_i32 s44, s44, 3 -; SI-NEXT: s_add_i32 s43, s43, 3 -; SI-NEXT: s_add_i32 s41, s41, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: v_mov_b32_e32 v22, s45 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s46, s46, 3 ; SI-NEXT: s_add_i32 s42, s42, 3 -; SI-NEXT: s_add_i32 s40, s40, 3 -; SI-NEXT: s_add_i32 s14, s14, 3 -; SI-NEXT: s_add_i32 s12, s12, 3 -; SI-NEXT: s_add_i32 s10, s10, 3 -; SI-NEXT: s_add_i32 s8, s8, 3 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s9 -; SI-NEXT: v_mov_b32_e32 v9, s11 -; SI-NEXT: v_mov_b32_e32 v12, s13 -; SI-NEXT: v_mov_b32_e32 v15, s15 -; SI-NEXT: v_mov_b32_e32 v18, s41 -; SI-NEXT: v_mov_b32_e32 v21, s43 -; SI-NEXT: v_alignbit_b32 v24, s44, v22, 24 -; SI-NEXT: v_alignbit_b32 v25, s44, v22, 16 -; SI-NEXT: v_alignbit_b32 v26, s44, v22, 8 -; SI-NEXT: v_mov_b32_e32 v22, s47 -; SI-NEXT: v_mov_b32_e32 v23, s28 -; SI-NEXT: v_mov_b32_e32 v29, s26 -; SI-NEXT: v_mov_b32_e32 v35, s24 -; SI-NEXT: v_mov_b32_e32 v39, s22 -; SI-NEXT: v_mov_b32_e32 v50, s20 -; SI-NEXT: v_mov_b32_e32 v53, s18 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s8, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s8, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s8, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s12, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s12, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s12, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s14, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s14, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s14, v15, 8 -; SI-NEXT: v_alignbit_b32 v16, s40, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s40, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s40, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8 -; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24 -; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16 -; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8 -; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24 -; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8 -; SI-NEXT: v_alignbit_b32 v27, s27, v29, 24 -; SI-NEXT: v_alignbit_b32 v28, s27, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, s27, v29, 8 -; SI-NEXT: v_alignbit_b32 v33, s25, v35, 24 -; SI-NEXT: v_alignbit_b32 v34, s25, v35, 16 -; SI-NEXT: v_alignbit_b32 v35, s25, v35, 8 -; SI-NEXT: v_alignbit_b32 v37, s23, v39, 24 -; SI-NEXT: v_alignbit_b32 v38, s23, v39, 16 -; SI-NEXT: v_alignbit_b32 v39, s23, v39, 8 -; SI-NEXT: v_alignbit_b32 v48, s21, v50, 24 -; SI-NEXT: v_alignbit_b32 v49, s21, v50, 16 -; SI-NEXT: v_alignbit_b32 v50, s21, v50, 8 -; SI-NEXT: v_alignbit_b32 v51, s19, v53, 24 -; SI-NEXT: v_alignbit_b32 v52, s19, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, s19, v53, 8 -; SI-NEXT: v_alignbit_b32 v54, s17, v40, 24 -; SI-NEXT: v_alignbit_b32 v55, s17, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, s17, v40, 8 -; SI-NEXT: s_lshr_b32 s56, s6, 24 -; SI-NEXT: s_lshr_b32 s57, s6, 16 -; SI-NEXT: s_lshr_b32 s58, s6, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s60, s8, 16 -; SI-NEXT: s_lshr_b32 s61, s8, 8 -; SI-NEXT: s_lshr_b32 s62, s10, 24 -; SI-NEXT: s_lshr_b32 s63, s10, 16 -; SI-NEXT: s_lshr_b32 s72, s10, 8 -; SI-NEXT: s_lshr_b32 s73, s12, 24 -; SI-NEXT: s_lshr_b32 s74, s12, 16 -; SI-NEXT: s_lshr_b32 s75, s12, 8 -; SI-NEXT: s_lshr_b32 s76, s14, 24 -; SI-NEXT: s_lshr_b32 s77, s14, 16 -; SI-NEXT: s_lshr_b32 s78, s14, 8 -; SI-NEXT: s_lshr_b32 s79, s40, 24 -; SI-NEXT: s_lshr_b32 s88, s40, 16 -; SI-NEXT: s_lshr_b32 s89, s40, 8 -; SI-NEXT: s_lshr_b32 s90, s42, 24 -; SI-NEXT: s_lshr_b32 s91, s42, 16 -; SI-NEXT: s_lshr_b32 s92, s42, 8 -; SI-NEXT: s_lshr_b32 s93, s44, 24 -; SI-NEXT: s_lshr_b32 s94, s44, 16 -; SI-NEXT: s_lshr_b32 s95, s44, 8 -; SI-NEXT: s_lshr_b32 s30, s46, 24 -; SI-NEXT: s_lshr_b32 s31, s46, 16 -; SI-NEXT: s_lshr_b32 s34, s46, 8 -; SI-NEXT: s_lshr_b32 s35, s29, 24 -; SI-NEXT: s_lshr_b32 s36, s29, 16 -; SI-NEXT: s_lshr_b32 s37, s29, 8 -; SI-NEXT: s_lshr_b32 s38, s27, 24 -; SI-NEXT: s_lshr_b32 s39, s27, 16 -; SI-NEXT: s_lshr_b32 s48, s27, 8 -; SI-NEXT: s_lshr_b32 s49, s25, 24 -; SI-NEXT: s_lshr_b32 s50, s25, 16 -; SI-NEXT: s_lshr_b32 s51, s25, 8 -; SI-NEXT: s_lshr_b32 s52, s23, 24 -; SI-NEXT: s_lshr_b32 s53, s23, 16 -; SI-NEXT: s_lshr_b32 s54, s23, 8 -; SI-NEXT: s_lshr_b32 s55, s21, 24 -; SI-NEXT: s_lshr_b32 s64, s21, 16 -; SI-NEXT: s_lshr_b32 s65, s21, 8 -; SI-NEXT: s_lshr_b32 s66, s19, 24 -; SI-NEXT: s_lshr_b32 s67, s19, 16 -; SI-NEXT: s_lshr_b32 s68, s19, 8 -; SI-NEXT: s_lshr_b32 s69, s17, 24 -; SI-NEXT: s_lshr_b32 s70, s17, 16 -; SI-NEXT: s_lshr_b32 s71, s17, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 0 +; SI-NEXT: s_lshr_b32 s50, s9, 24 +; SI-NEXT: s_lshr_b32 s51, s21, 8 +; SI-NEXT: s_lshr_b32 s48, s19, 24 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s19, 8 +; SI-NEXT: s_lshr_b32 s54, s17, 24 +; SI-NEXT: s_lshr_b32 s55, s17, 16 +; SI-NEXT: s_lshr_b32 s49, s17, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 1 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v40 -; SI-NEXT: v_or_b32_e32 v40, s4, v40 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s71, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s70, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s69, 24 -; SI-NEXT: v_and_b32_e32 v55, 0xff, v55 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v55 -; SI-NEXT: v_lshlrev_b32_e32 v54, 24, v54 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v54, v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v53, 8, v53 -; SI-NEXT: v_or_b32_e32 v53, s4, v53 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s68, 8 -; SI-NEXT: v_and_b32_e32 v52, 0xff, v52 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s67, 0xff -; SI-NEXT: v_and_b32_e32 v40, 0xffff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52 -; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v51 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s66, 24 -; SI-NEXT: v_or_b32_e32 v54, v40, v54 -; SI-NEXT: v_and_b32_e32 v53, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v51, v51, v52 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: buffer_store_dword v54, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v54, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v51, v53, v51 -; SI-NEXT: v_add_i32_e32 v52, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v55, v54, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v51, v52, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v52, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v50, 8, v50 -; SI-NEXT: v_or_b32_e32 v50, s4, v50 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s65, 8 -; SI-NEXT: v_and_b32_e32 v49, 0xff, v49 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s64, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 -; SI-NEXT: v_lshlrev_b32_e32 v48, 24, v48 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s55, 24 -; SI-NEXT: v_and_b32_e32 v50, 0xffff, v50 -; SI-NEXT: v_or_b32_e32 v48, v48, v49 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: s_lshl_b32 s47, s38, 8 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: s_and_b32 s47, s36, 0xff +; SI-NEXT: s_lshl_b32 s57, s34, 24 +; SI-NEXT: s_lshl_b32 s47, s47, 16 +; SI-NEXT: s_or_b32 s47, s57, s47 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xff +; SI-NEXT: s_lshl_b32 s17, s49, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s55, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s47, s54, 24 +; SI-NEXT: s_or_b32 s17, s47, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s30, 8 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s94, 0xff +; SI-NEXT: s_lshl_b32 s18, s92, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xff +; SI-NEXT: s_lshl_b32 s17, s53, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s52, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s48, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s90, 8 +; SI-NEXT: s_and_b32 s17, s20, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s88, 0xff +; SI-NEXT: s_lshl_b32 s18, s78, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xff +; SI-NEXT: s_lshl_b32 s17, s51, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 17 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 16 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v6, s16 +; SI-NEXT: s_lshl_b32 s16, s76, 8 +; SI-NEXT: s_and_b32 s17, s22, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s74, 0xff +; SI-NEXT: s_lshl_b32 s18, s72, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 15 +; SI-NEXT: v_mov_b32_e32 v7, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 14 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 13 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v8, s16 +; SI-NEXT: s_lshl_b32 s16, s62, 8 +; SI-NEXT: s_and_b32 s17, s24, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s60, 0xff +; SI-NEXT: s_lshl_b32 s18, s58, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 12 +; SI-NEXT: v_mov_b32_e32 v9, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 11 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 10 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: s_lshl_b32 s16, s56, 8 +; SI-NEXT: s_and_b32 s17, s26, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s46, 0xff +; SI-NEXT: s_lshl_b32 s18, s98, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 9 +; SI-NEXT: v_mov_b32_e32 v11, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 8 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 7 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v12, s16 +; SI-NEXT: s_lshl_b32 s16, s96, 8 +; SI-NEXT: s_and_b32 s17, s28, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s86, 0xff +; SI-NEXT: s_lshl_b32 s18, s84, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 6 +; SI-NEXT: v_mov_b32_e32 v13, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 5 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 4 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v14, s16 +; SI-NEXT: s_lshl_b32 s16, s82, 8 +; SI-NEXT: s_and_b32 s17, s44, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s80, 0xff +; SI-NEXT: s_lshl_b32 s18, s70, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 3 +; SI-NEXT: v_mov_b32_e32 v15, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 2 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v51, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v48, v50, v48 -; SI-NEXT: v_add_i32_e32 v49, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v52, v51, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v48, v49, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v49, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v39 -; SI-NEXT: v_or_b32_e32 v39, s4, v39 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s54, 8 -; SI-NEXT: v_and_b32_e32 v38, 0xff, v38 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s53, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v38 -; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v37 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s52, 24 -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v37, v37, v38 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: v_mov_b32_e32 v16, s16 +; SI-NEXT: s_lshl_b32 s16, s68, 8 +; SI-NEXT: s_and_b32 s17, s42, 0xff +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s66, 0xff +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_lshl_b32 s18, s64, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: v_readlane_b32 s17, v21, 0 +; SI-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 63 +; SI-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 62 +; SI-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v22, 0 +; SI-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: v_readlane_b32 s19, v22, 1 +; SI-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_lshl_b32 s17, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v22, 2 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v48, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v37, v39, v37 -; SI-NEXT: v_add_i32_e32 v38, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v49, v48, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v37, v38, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v38, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v35, 8, v35 -; SI-NEXT: v_or_b32_e32 v35, s4, v35 -; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s51, 8 -; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s50, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 -; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v33 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s49, 24 -; SI-NEXT: v_and_b32_e32 v35, 0xffff, v35 -; SI-NEXT: v_or_b32_e32 v33, v33, v34 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: v_readlane_b32 s19, v22, 3 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 4 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 61 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v37, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v33, v35, v33 -; SI-NEXT: v_add_i32_e32 v34, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v38, v37, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v33, v34, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v34, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v29 -; SI-NEXT: v_or_b32_e32 v29, s4, v29 -; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s5, s48, 8 -; SI-NEXT: v_and_b32_e32 v28, 0xff, v28 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s39, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s38, 24 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 60 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 59 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v33, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v27, v29, v27 -; SI-NEXT: v_add_i32_e32 v28, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v34, v33, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v28, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; SI-NEXT: v_or_b32_e32 v23, s4, v23 -; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s37, 8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: v_readlane_b32 s16, v22, 6 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s17, v22, 7 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: v_readlane_b32 s19, v22, 5 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: v_readlane_b32 s16, v22, 8 +; SI-NEXT: v_readlane_b32 s17, v22, 9 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 10 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s18, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v27, vcc, 44, v0 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s36, 0xff -; SI-NEXT: buffer_store_dword v28, v27, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v36 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s35, 24 -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 -; SI-NEXT: v_or_b32_e32 v22, v27, v22 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_add_i32_e32 v23, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: v_readlane_b32 s15, v22, 58 +; SI-NEXT: s_lshl_b32 s15, s15, 8 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_readlane_b32 s15, v22, 57 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: v_readlane_b32 s16, v22, 56 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_lshl_b32 s16, s16, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s47, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v32 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s46, 0xff -; SI-NEXT: s_lshl_b32 s5, s34, 8 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_readlane_b32 s14, v22, 12 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s15, v22, 13 +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_readlane_b32 s14, v22, 14 +; SI-NEXT: v_readlane_b32 s15, v22, 15 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s16, v22, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s16, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v31 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s31, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v30 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s30, 24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v27, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: v_readlane_b32 s13, v22, 55 +; SI-NEXT: s_lshl_b32 s13, s13, 8 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_readlane_b32 s13, v22, 54 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: v_readlane_b32 s14, v22, 53 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 60, v0 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v26 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s44, 0xff -; SI-NEXT: s_lshl_b32 s5, s95, 8 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_readlane_b32 s12, v22, 18 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s13, v22, 19 +; SI-NEXT: s_lshl_b32 s12, s12, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: v_readlane_b32 s12, v22, 20 +; SI-NEXT: v_readlane_b32 s13, v22, 21 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s14, v22, 22 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s14, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v25 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s94, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s93, 24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v24, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v21, s4, v21 -; SI-NEXT: s_and_b32 s4, s42, 0xff -; SI-NEXT: s_lshl_b32 s5, s92, 8 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s91, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s90, 24 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: v_readlane_b32 s11, v22, 52 +; SI-NEXT: s_lshl_b32 s11, s11, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_readlane_b32 s11, v22, 51 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: v_readlane_b32 s12, v22, 50 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s12, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v19, v21, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v20, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 -; SI-NEXT: v_or_b32_e32 v18, s4, v18 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s5, s89, 8 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s88, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s79, 24 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: v_readlane_b32 s10, v22, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s11, v22, 25 +; SI-NEXT: s_lshl_b32 s10, s10, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: v_readlane_b32 s10, v22, 26 +; SI-NEXT: v_readlane_b32 s11, v22, 27 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s12, v22, 28 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v16, v18, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v20, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s5, s78, 8 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s77, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s76, 24 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: v_readlane_b32 s9, v22, 49 +; SI-NEXT: s_lshl_b32 s9, s9, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_readlane_b32 s9, v22, 48 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s50, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: s_lshl_b32 s5, s75, 8 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s74, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s73, 24 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_readlane_b32 s8, v22, 30 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s9, v22, 31 +; SI-NEXT: s_lshl_b32 s8, s8, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: v_readlane_b32 s8, v22, 32 +; SI-NEXT: v_readlane_b32 s9, v22, 33 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s10, v22, 34 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s72, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s63, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s62, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: v_readlane_b32 s7, v22, 47 +; SI-NEXT: s_lshl_b32 s7, s7, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_readlane_b32 s7, v22, 46 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: v_readlane_b32 s8, v22, 45 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s8, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s61, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s60, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s8, s59, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_readlane_b32 s6, v22, 36 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: v_readlane_b32 s7, v22, 37 +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: v_readlane_b32 s6, v22, 38 +; SI-NEXT: v_readlane_b32 s7, v22, 39 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s8, v22, 40 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s8, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s58, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: v_readlane_b32 s5, v22, 44 +; SI-NEXT: s_lshl_b32 s5, s5, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s57, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: v_readlane_b32 s5, v22, 43 +; SI-NEXT: s_and_b32 s5, s5, 0xff +; SI-NEXT: v_readlane_b32 s6, v22, 42 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s56, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s6, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x74, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s19, v22, 11 +; SI-NEXT: v_readlane_b32 s17, v22, 17 +; SI-NEXT: v_readlane_b32 s15, v22, 23 +; SI-NEXT: v_readlane_b32 s13, v22, 29 +; SI-NEXT: v_readlane_b32 s11, v22, 35 +; SI-NEXT: v_readlane_b32 s9, v22, 41 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s71, v41, 23 -; SI-NEXT: v_readlane_b32 s70, v41, 22 -; SI-NEXT: v_readlane_b32 s69, v41, 21 -; SI-NEXT: v_readlane_b32 s68, v41, 20 -; SI-NEXT: v_readlane_b32 s67, v41, 19 -; SI-NEXT: v_readlane_b32 s66, v41, 18 -; SI-NEXT: v_readlane_b32 s65, v41, 17 -; SI-NEXT: v_readlane_b32 s64, v41, 16 -; SI-NEXT: v_readlane_b32 s55, v41, 15 -; SI-NEXT: v_readlane_b32 s54, v41, 14 -; SI-NEXT: v_readlane_b32 s53, v41, 13 -; SI-NEXT: v_readlane_b32 s52, v41, 12 -; SI-NEXT: v_readlane_b32 s51, v41, 11 -; SI-NEXT: v_readlane_b32 s50, v41, 10 -; SI-NEXT: v_readlane_b32 s49, v41, 9 -; SI-NEXT: v_readlane_b32 s48, v41, 8 -; SI-NEXT: v_readlane_b32 s39, v41, 7 -; SI-NEXT: v_readlane_b32 s38, v41, 6 -; SI-NEXT: v_readlane_b32 s37, v41, 5 -; SI-NEXT: v_readlane_b32 s36, v41, 4 -; SI-NEXT: v_readlane_b32 s35, v41, 3 -; SI-NEXT: v_readlane_b32 s34, v41, 2 -; SI-NEXT: v_readlane_b32 s31, v41, 1 -; SI-NEXT: v_readlane_b32 s30, v41, 0 -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v20, 35 +; SI-NEXT: v_readlane_b32 s98, v20, 34 +; SI-NEXT: v_readlane_b32 s97, v20, 33 +; SI-NEXT: v_readlane_b32 s96, v20, 32 +; SI-NEXT: v_readlane_b32 s87, v20, 31 +; SI-NEXT: v_readlane_b32 s86, v20, 30 +; SI-NEXT: v_readlane_b32 s85, v20, 29 +; SI-NEXT: v_readlane_b32 s84, v20, 28 +; SI-NEXT: v_readlane_b32 s83, v20, 27 +; SI-NEXT: v_readlane_b32 s82, v20, 26 +; SI-NEXT: v_readlane_b32 s81, v20, 25 +; SI-NEXT: v_readlane_b32 s80, v20, 24 +; SI-NEXT: v_readlane_b32 s71, v20, 23 +; SI-NEXT: v_readlane_b32 s70, v20, 22 +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $sgpr71 -; SI-NEXT: ; implicit-def: $sgpr70 -; SI-NEXT: ; implicit-def: $sgpr69 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $sgpr68 -; SI-NEXT: ; implicit-def: $sgpr67 -; SI-NEXT: ; implicit-def: $sgpr66 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $sgpr65 -; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v22, s50, 0 +; SI-NEXT: v_writelane_b32 v22, s51, 1 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 2 +; SI-NEXT: v_writelane_b32 v22, s51, 3 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 4 +; SI-NEXT: v_writelane_b32 v22, s51, 5 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 6 +; SI-NEXT: v_writelane_b32 v22, s51, 7 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 8 +; SI-NEXT: v_writelane_b32 v22, s51, 9 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 10 +; SI-NEXT: v_writelane_b32 v22, s51, 11 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 12 +; SI-NEXT: v_writelane_b32 v22, s51, 13 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 14 +; SI-NEXT: v_writelane_b32 v22, s51, 15 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 16 +; SI-NEXT: v_writelane_b32 v22, s51, 17 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 18 +; SI-NEXT: v_writelane_b32 v22, s51, 19 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 20 +; SI-NEXT: v_writelane_b32 v22, s51, 21 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 22 +; SI-NEXT: v_writelane_b32 v22, s51, 23 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 24 +; SI-NEXT: v_writelane_b32 v22, s51, 25 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 26 +; SI-NEXT: v_writelane_b32 v22, s51, 27 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 28 +; SI-NEXT: v_writelane_b32 v22, s51, 29 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 30 +; SI-NEXT: v_writelane_b32 v22, s51, 31 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 32 +; SI-NEXT: v_writelane_b32 v22, s51, 33 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 34 +; SI-NEXT: v_writelane_b32 v22, s51, 35 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 36 +; SI-NEXT: v_writelane_b32 v22, s51, 37 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 38 +; SI-NEXT: v_writelane_b32 v22, s51, 39 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr55 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr52 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $sgpr51 -; SI-NEXT: ; implicit-def: $sgpr50 -; SI-NEXT: ; implicit-def: $sgpr49 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $sgpr48 -; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; kill: killed $sgpr46 ; SI-NEXT: ; implicit-def: $sgpr38 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr36 -; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr34 -; SI-NEXT: ; implicit-def: $sgpr31 ; SI-NEXT: ; implicit-def: $sgpr30 -; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr94 -; SI-NEXT: ; implicit-def: $sgpr93 ; SI-NEXT: ; implicit-def: $sgpr92 -; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr90 -; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr88 -; SI-NEXT: ; implicit-def: $sgpr79 ; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $sgpr63 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s50, 40 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: v_writelane_b32 v22, s51, 41 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v32i32_to_v128i8_scalar: @@ -34454,385 +34790,431 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v32i32_to_v64i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_readfirstlane_b32 s47, v1 -; SI-NEXT: v_readfirstlane_b32 s46, v2 -; SI-NEXT: v_readfirstlane_b32 s45, v3 -; SI-NEXT: v_readfirstlane_b32 s44, v4 -; SI-NEXT: v_readfirstlane_b32 s43, v5 -; SI-NEXT: v_readfirstlane_b32 s42, v6 -; SI-NEXT: v_readfirstlane_b32 s41, v7 -; SI-NEXT: v_readfirstlane_b32 s40, v8 -; SI-NEXT: v_readfirstlane_b32 s15, v9 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s12, v12 -; SI-NEXT: v_readfirstlane_b32 s11, v13 -; SI-NEXT: v_readfirstlane_b32 s10, v14 -; SI-NEXT: v_readfirstlane_b32 s9, v15 -; SI-NEXT: v_readfirstlane_b32 s8, v16 -; SI-NEXT: v_readfirstlane_b32 s7, v17 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v18 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: v_writelane_b32 v20, s69, 21 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s47 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v11, s26 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v13, s22 -; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v15, s18 -; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s46, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s29, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s27, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s23, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s21, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s19, v15, 16 -; SI-NEXT: v_alignbit_b32 v16, s17, v16, 16 -; SI-NEXT: s_lshr_b32 s56, s6, 16 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s58, s10, 16 -; SI-NEXT: s_lshr_b32 s59, s12, 16 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s40, 16 -; SI-NEXT: s_lshr_b32 s62, s42, 16 -; SI-NEXT: s_lshr_b32 s63, s44, 16 -; SI-NEXT: s_lshr_b32 s72, s46, 16 -; SI-NEXT: s_lshr_b32 s73, s29, 16 -; SI-NEXT: s_lshr_b32 s74, s27, 16 -; SI-NEXT: s_lshr_b32 s75, s25, 16 -; SI-NEXT: s_lshr_b32 s76, s23, 16 -; SI-NEXT: s_lshr_b32 s77, s21, 16 -; SI-NEXT: s_lshr_b32 s78, s19, 16 -; SI-NEXT: s_lshr_b32 s79, s17, 16 +; SI-NEXT: s_lshr_b32 s38, s5, 16 +; SI-NEXT: s_lshr_b32 s39, s7, 16 +; SI-NEXT: s_lshr_b32 s48, s9, 16 +; SI-NEXT: s_lshr_b32 s49, s11, 16 +; SI-NEXT: s_lshr_b32 s50, s13, 16 +; SI-NEXT: s_lshr_b32 s51, s15, 16 +; SI-NEXT: s_lshr_b32 s52, s41, 16 +; SI-NEXT: s_lshr_b32 s53, s43, 16 +; SI-NEXT: s_lshr_b32 s54, s45, 16 +; SI-NEXT: s_lshr_b32 s55, s29, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 16 +; SI-NEXT: s_lshr_b32 s67, s21, 16 +; SI-NEXT: s_lshr_b32 s68, s19, 16 +; SI-NEXT: s_lshr_b32 s69, s17, 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s47, s47, 3 ; SI-NEXT: s_add_i32 s45, s45, 3 -; SI-NEXT: s_add_i32 s43, s43, 3 -; SI-NEXT: s_add_i32 s41, s41, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s46, s46, 3 ; SI-NEXT: s_add_i32 s44, s44, 3 +; SI-NEXT: s_add_i32 s43, s43, 3 ; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: s_add_i32 s41, s41, 3 ; SI-NEXT: s_add_i32 s40, s40, 3 +; SI-NEXT: s_add_i32 s15, s15, 3 ; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s47 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v11, s26 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v13, s22 -; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v15, s18 -; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s46, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s29, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s27, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s23, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s21, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s19, v15, 16 -; SI-NEXT: v_alignbit_b32 v16, s17, v16, 16 -; SI-NEXT: s_lshr_b32 s56, s6, 16 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s58, s10, 16 -; SI-NEXT: s_lshr_b32 s59, s12, 16 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s40, 16 -; SI-NEXT: s_lshr_b32 s62, s42, 16 -; SI-NEXT: s_lshr_b32 s63, s44, 16 -; SI-NEXT: s_lshr_b32 s72, s46, 16 -; SI-NEXT: s_lshr_b32 s73, s29, 16 -; SI-NEXT: s_lshr_b32 s74, s27, 16 -; SI-NEXT: s_lshr_b32 s75, s25, 16 -; SI-NEXT: s_lshr_b32 s76, s23, 16 -; SI-NEXT: s_lshr_b32 s77, s21, 16 -; SI-NEXT: s_lshr_b32 s78, s19, 16 -; SI-NEXT: s_lshr_b32 s79, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[28:29], 16 +; SI-NEXT: s_lshr_b32 s38, s5, 16 +; SI-NEXT: s_lshr_b32 s39, s7, 16 +; SI-NEXT: s_lshr_b32 s48, s9, 16 +; SI-NEXT: s_lshr_b32 s49, s11, 16 +; SI-NEXT: s_lshr_b32 s50, s13, 16 +; SI-NEXT: s_lshr_b32 s51, s15, 16 +; SI-NEXT: s_lshr_b32 s52, s41, 16 +; SI-NEXT: s_lshr_b32 s53, s43, 16 +; SI-NEXT: s_lshr_b32 s54, s45, 16 +; SI-NEXT: s_lshr_b32 s55, s29, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 16 +; SI-NEXT: s_lshr_b32 s67, s21, 16 +; SI-NEXT: s_lshr_b32 s68, s19, 16 +; SI-NEXT: s_lshr_b32 s69, s17, 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 ; SI-NEXT: .LBB25_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_or_b32_e32 v16, s4, v16 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s79, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_store_dword v16, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s47, s36, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s69, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s34, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s68, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s30, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s78, 16 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v16, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v16, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s67, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s94, 16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s77, 16 -; SI-NEXT: buffer_store_dword v16, v15, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s66, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s75, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s65, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s47, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s64, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s46, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s45, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s44, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s44, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xffff +; SI-NEXT: s_lshl_b32 s17, s54, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr79 -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr55 ; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr77 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr75 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr52 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr46 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_v32i32_to_v64i16_scalar: @@ -43268,887 +43650,1713 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a, ; SI-LABEL: bitcast_v32f32_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v63, s30, 0 +; SI-NEXT: v_writelane_b32 v63, s31, 1 +; SI-NEXT: v_writelane_b32 v63, s34, 2 +; SI-NEXT: v_writelane_b32 v63, s35, 3 +; SI-NEXT: v_writelane_b32 v63, s36, 4 +; SI-NEXT: v_writelane_b32 v63, s37, 5 +; SI-NEXT: v_writelane_b32 v63, s38, 6 +; SI-NEXT: v_writelane_b32 v63, s39, 7 +; SI-NEXT: v_writelane_b32 v63, s48, 8 +; SI-NEXT: v_writelane_b32 v63, s49, 9 +; SI-NEXT: v_writelane_b32 v63, s50, 10 +; SI-NEXT: v_writelane_b32 v63, s51, 11 +; SI-NEXT: v_writelane_b32 v63, s52, 12 +; SI-NEXT: v_writelane_b32 v63, s53, 13 +; SI-NEXT: v_writelane_b32 v63, s54, 14 +; SI-NEXT: v_writelane_b32 v63, s55, 15 +; SI-NEXT: v_writelane_b32 v63, s64, 16 +; SI-NEXT: v_writelane_b32 v63, s65, 17 +; SI-NEXT: v_writelane_b32 v63, s66, 18 +; SI-NEXT: v_writelane_b32 v63, s67, 19 +; SI-NEXT: v_writelane_b32 v63, s68, 20 +; SI-NEXT: v_writelane_b32 v63, s69, 21 +; SI-NEXT: v_writelane_b32 v63, s70, 22 +; SI-NEXT: v_writelane_b32 v63, s71, 23 +; SI-NEXT: v_writelane_b32 v63, s80, 24 +; SI-NEXT: v_writelane_b32 v63, s81, 25 +; SI-NEXT: v_writelane_b32 v63, s82, 26 +; SI-NEXT: v_writelane_b32 v63, s83, 27 +; SI-NEXT: v_writelane_b32 v63, s84, 28 +; SI-NEXT: v_writelane_b32 v63, s85, 29 +; SI-NEXT: v_writelane_b32 v63, s86, 30 +; SI-NEXT: v_writelane_b32 v63, s87, 31 +; SI-NEXT: v_writelane_b32 v63, s96, 32 +; SI-NEXT: v_writelane_b32 v63, s97, 33 +; SI-NEXT: v_writelane_b32 v63, s98, 34 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v56, s16 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v47, s17 -; SI-NEXT: v_mov_b32_e32 v44, s18 -; SI-NEXT: v_mov_b32_e32 v42, s19 -; SI-NEXT: v_mov_b32_e32 v40, s20 -; SI-NEXT: v_mov_b32_e32 v53, s21 -; SI-NEXT: v_mov_b32_e32 v51, s22 -; SI-NEXT: v_mov_b32_e32 v48, s23 -; SI-NEXT: v_mov_b32_e32 v38, s24 -; SI-NEXT: v_mov_b32_e32 v35, s25 -; SI-NEXT: v_mov_b32_e32 v33, s26 -; SI-NEXT: v_mov_b32_e32 v30, s27 -; SI-NEXT: v_mov_b32_e32 v28, s28 -; SI-NEXT: v_mov_b32_e32 v25, s29 -; SI-NEXT: s_cbranch_scc0 .LBB37_4 +; SI-NEXT: v_writelane_b32 v63, s99, 35 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr61 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane +; SI-NEXT: s_cbranch_scc0 .LBB37_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 17 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s9, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 41 +; SI-NEXT: s_lshr_b32 s46, s21, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 40 +; SI-NEXT: s_lshr_b32 s46, s19, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 39 +; SI-NEXT: s_lshr_b32 s46, s19, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 38 +; SI-NEXT: s_lshr_b32 s46, s19, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 37 +; SI-NEXT: s_lshr_b32 s46, s17, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 36 +; SI-NEXT: s_lshr_b32 s46, s17, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 35 +; SI-NEXT: s_lshr_b32 s46, s17, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 34 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 32 +; SI-NEXT: v_writelane_b32 v61, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 30 +; SI-NEXT: v_writelane_b32 v61, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 28 +; SI-NEXT: v_writelane_b32 v61, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 26 +; SI-NEXT: v_writelane_b32 v61, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 24 +; SI-NEXT: v_writelane_b32 v61, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 22 +; SI-NEXT: v_writelane_b32 v61, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 20 +; SI-NEXT: v_writelane_b32 v61, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 18 +; SI-NEXT: v_writelane_b32 v61, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 16 +; SI-NEXT: v_writelane_b32 v61, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 14 +; SI-NEXT: v_writelane_b32 v61, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 12 +; SI-NEXT: v_writelane_b32 v61, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 10 +; SI-NEXT: v_writelane_b32 v61, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 8 +; SI-NEXT: v_writelane_b32 v61, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 6 +; SI-NEXT: v_writelane_b32 v61, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 4 +; SI-NEXT: v_writelane_b32 v61, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 2 +; SI-NEXT: v_writelane_b32 v61, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 0 +; SI-NEXT: s_lshr_b64 s[48:49], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v61, s47, 1 +; SI-NEXT: s_lshr_b64 s[50:51], s[40:41], 24 +; SI-NEXT: s_lshr_b64 s[52:53], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[54:55], s[40:41], 8 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 +; SI-NEXT: s_cbranch_execnz .LBB37_4 +; SI-NEXT: .LBB37_2: ; %cmp.true +; SI-NEXT: v_add_f32_e64 v2, s5, 1.0 +; SI-NEXT: v_add_f32_e64 v1, s4, 1.0 +; SI-NEXT: v_lshr_b64 v[13:14], v[1:2], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[1:2], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[1:2], 8 +; SI-NEXT: v_add_f32_e64 v4, s7, 1.0 +; SI-NEXT: v_add_f32_e64 v3, s6, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[3:4], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[3:4], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[3:4], 8 +; SI-NEXT: v_add_f32_e64 v6, s9, 1.0 +; SI-NEXT: v_add_f32_e64 v5, s8, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[5:6], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[5:6], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[5:6], 8 +; SI-NEXT: v_add_f32_e64 v8, s11, 1.0 +; SI-NEXT: v_add_f32_e64 v7, s10, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[7:8], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[13:14], v[7:8], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[7:8], 8 +; SI-NEXT: v_add_f32_e64 v10, s13, 1.0 +; SI-NEXT: v_add_f32_e64 v9, s12, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[9:10], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[9:10], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[9:10], 8 +; SI-NEXT: v_add_f32_e64 v12, s15, 1.0 +; SI-NEXT: v_add_f32_e64 v11, s14, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[11:12], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[11:12], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[11:12], 8 +; SI-NEXT: v_add_f32_e64 v16, s41, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s40, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[15:16], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[15:16], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[15:16], 8 +; SI-NEXT: v_add_f32_e64 v21, s43, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s42, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[20:21], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[20:21], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[20:21], 8 +; SI-NEXT: v_add_f32_e64 v26, s45, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s44, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[25:26], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[25:26], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[25:26], 8 +; SI-NEXT: v_add_f32_e64 v30, s29, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s28, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[29:30], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[29:30], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[29:30], 8 +; SI-NEXT: v_add_f32_e64 v36, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v35, s26, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v2 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v2 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v2 -; SI-NEXT: v_alignbit_b32 v23, v53, v40, 24 -; SI-NEXT: v_alignbit_b32 v26, v53, v40, 16 -; SI-NEXT: v_alignbit_b32 v29, v53, v40, 8 -; SI-NEXT: v_alignbit_b32 v32, v42, v44, 24 -; SI-NEXT: v_alignbit_b32 v36, v42, v44, 16 -; SI-NEXT: v_alignbit_b32 v39, v42, v44, 8 -; SI-NEXT: v_alignbit_b32 v50, v47, v56, 24 -; SI-NEXT: v_alignbit_b32 v54, v47, v56, 16 -; SI-NEXT: v_alignbit_b32 v41, v47, v56, 8 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v25 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v25 -; SI-NEXT: v_lshrrev_b32_e32 v34, 24, v30 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v30 -; SI-NEXT: v_lshrrev_b32_e32 v52, 24, v35 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v43, 8, v35 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v48 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v48 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v48 -; SI-NEXT: v_lshrrev_b32_e32 v58, 24, v53 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v53 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v53 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v42 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v42 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v47 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v47 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v47 -; SI-NEXT: s_cbranch_execnz .LBB37_3 -; SI-NEXT: .LBB37_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[35:36], 24 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 16 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[35:36], 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v18, v17, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[35:36], 8 +; SI-NEXT: v_add_f32_e64 v49, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v48, s24, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[17:18], v[48:49], 8 +; SI-NEXT: v_add_f32_e64 v53, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v52, s22, 1.0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 24 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 16 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 16 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[17:18], v[52:53], 8 +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v2 +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v16, v15, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v2 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v2 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 16 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v4 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v14, v13, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v4 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v4 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 16 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v6 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v12, v11, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v6 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v6 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 16 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v8 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v10, v9, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v8 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v8 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 16 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v10 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v8, v7, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v10 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v10 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 16 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v12 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v12 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v12 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 16 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v4, v3, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 16 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v21 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v21 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v21 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v26 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v26 +; SI-NEXT: v_add_f32_e64 v41, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v40, s20, 1.0 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v26 +; SI-NEXT: v_add_f32_e64 v58, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v57, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v46, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v45, s18, 1.0 +; SI-NEXT: v_lshr_b64 v[31:32], v[40:41], 16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v30 +; SI-NEXT: v_lshr_b64 v[32:33], v[40:41], 8 +; SI-NEXT: v_lshr_b64 v[37:38], v[45:46], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[57:58], 16 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v30 +; SI-NEXT: v_lshr_b64 v[27:28], v[40:41], 24 +; SI-NEXT: v_lshr_b64 v[33:34], v[45:46], 24 +; SI-NEXT: v_lshr_b64 v[38:39], v[45:46], 8 +; SI-NEXT: v_lshr_b64 v[50:51], v[57:58], 24 +; SI-NEXT: v_lshr_b64 v[43:44], v[57:58], 8 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v30 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v55, 24, v36 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v49 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v49 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v17, 8, v53 +; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v41 +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v41 +; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v41 +; SI-NEXT: v_lshrrev_b32_e32 v18, 24, v46 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v46 +; SI-NEXT: v_lshrrev_b32_e32 v23, 8, v46 +; SI-NEXT: v_lshrrev_b32_e32 v39, 24, v58 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v58 +; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v58 +; SI-NEXT: s_branch .LBB37_5 +; SI-NEXT: .LBB37_3: +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 0 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 1 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 2 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 3 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 4 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 5 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 6 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 7 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 8 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 9 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 10 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 11 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 12 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 13 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 14 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 15 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 16 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 17 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 18 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 19 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 20 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 21 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 22 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 23 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 24 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 25 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 26 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 27 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 28 +; SI-NEXT: v_writelane_b32 v61, s49, 29 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 30 +; SI-NEXT: v_writelane_b32 v61, s49, 31 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 32 +; SI-NEXT: v_writelane_b32 v61, s49, 33 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: s_branch .LBB37_2 +; SI-NEXT: .LBB37_4: +; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 34 +; SI-NEXT: v_mov_b32_e32 v54, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 35 +; SI-NEXT: v_mov_b32_e32 v51, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 36 +; SI-NEXT: v_mov_b32_e32 v39, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 37 +; SI-NEXT: v_mov_b32_e32 v23, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 38 +; SI-NEXT: v_mov_b32_e32 v22, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 39 +; SI-NEXT: v_mov_b32_e32 v18, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 40 +; SI-NEXT: v_mov_b32_e32 v34, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 41 +; SI-NEXT: v_mov_b32_e32 v28, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 42 +; SI-NEXT: v_mov_b32_e32 v24, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 43 +; SI-NEXT: v_mov_b32_e32 v17, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 44 +; SI-NEXT: v_mov_b32_e32 v19, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 45 +; SI-NEXT: v_mov_b32_e32 v14, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 46 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v60, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 47 +; SI-NEXT: v_mov_b32_e32 v59, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 48 +; SI-NEXT: v_mov_b32_e32 v56, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 49 +; SI-NEXT: v_mov_b32_e32 v47, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 50 +; SI-NEXT: v_mov_b32_e32 v44, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 51 +; SI-NEXT: v_mov_b32_e32 v55, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 52 +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 53 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v2, v1, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 54 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 55 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 16 -; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_add_f32_e32 v33, 1.0, v33 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 56 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v25, v28, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 57 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 58 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 16 -; SI-NEXT: v_add_f32_e32 v35, 1.0, v35 -; SI-NEXT: v_add_f32_e32 v38, 1.0, v38 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 59 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v30, v33, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 60 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 61 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 16 -; SI-NEXT: v_add_f32_e32 v48, 1.0, v48 -; SI-NEXT: v_add_f32_e32 v51, 1.0, v51 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 62 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v35, v38, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 63 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 24 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 0 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 1 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v48, v51, 8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 2 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 3 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 5 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 6 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v16 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 7 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 8 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 9 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v14 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 10 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 11 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 12 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v12 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 13 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 14 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 15 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v10 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 16 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 17 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 32 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v8 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v6 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v4 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v2 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v2 -; SI-NEXT: v_add_f32_e32 v47, 1.0, v47 -; SI-NEXT: v_add_f32_e32 v56, 1.0, v56 -; SI-NEXT: v_add_f32_e32 v42, 1.0, v42 -; SI-NEXT: v_add_f32_e32 v44, 1.0, v44 -; SI-NEXT: v_add_f32_e32 v53, 1.0, v53 -; SI-NEXT: v_add_f32_e32 v40, 1.0, v40 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v2 -; SI-NEXT: v_alignbit_b32 v23, v53, v40, 24 -; SI-NEXT: v_alignbit_b32 v26, v53, v40, 16 -; SI-NEXT: v_alignbit_b32 v29, v53, v40, 8 -; SI-NEXT: v_alignbit_b32 v32, v42, v44, 24 -; SI-NEXT: v_alignbit_b32 v36, v42, v44, 16 -; SI-NEXT: v_alignbit_b32 v39, v42, v44, 8 -; SI-NEXT: v_alignbit_b32 v50, v47, v56, 24 -; SI-NEXT: v_alignbit_b32 v54, v47, v56, 16 -; SI-NEXT: v_alignbit_b32 v41, v47, v56, 8 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v18 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v25 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v25 -; SI-NEXT: v_lshrrev_b32_e32 v34, 24, v30 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v30 -; SI-NEXT: v_lshrrev_b32_e32 v52, 24, v35 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v43, 8, v35 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v48 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v48 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v48 -; SI-NEXT: v_lshrrev_b32_e32 v58, 24, v53 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v53 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v53 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v42 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v42 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v47 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v47 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v47 -; SI-NEXT: .LBB37_3: ; %end -; SI-NEXT: v_and_b32_e32 v56, 0xff, v56 -; SI-NEXT: v_lshlrev_b32_e32 v41, 8, v41 -; SI-NEXT: v_and_b32_e32 v54, 0xff, v54 -; SI-NEXT: v_or_b32_e32 v41, v56, v41 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v54 +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s48 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 33 +; SI-NEXT: v_readlane_b32 s4, v61, 30 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 31 +; SI-NEXT: v_readlane_b32 s4, v61, 28 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 29 +; SI-NEXT: v_readlane_b32 s4, v61, 26 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 27 +; SI-NEXT: v_readlane_b32 s4, v61, 24 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 25 +; SI-NEXT: v_readlane_b32 s4, v61, 22 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 23 +; SI-NEXT: v_readlane_b32 s4, v61, 20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 21 +; SI-NEXT: v_readlane_b32 s4, v61, 18 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 19 +; SI-NEXT: v_readlane_b32 s4, v61, 16 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 17 +; SI-NEXT: v_readlane_b32 s4, v61, 14 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 15 +; SI-NEXT: v_readlane_b32 s4, v61, 12 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 13 +; SI-NEXT: v_readlane_b32 s4, v61, 10 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 11 +; SI-NEXT: v_readlane_b32 s4, v61, 8 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 9 +; SI-NEXT: v_readlane_b32 s4, v61, 6 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 7 +; SI-NEXT: v_readlane_b32 s4, v61, 4 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 5 +; SI-NEXT: v_readlane_b32 s4, v61, 2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 3 +; SI-NEXT: v_readlane_b32 s4, v61, 0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s50 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s52 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s54 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s64 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s66 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s68 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s70 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s80 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s82 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s84 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s86 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s96 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s98 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s46 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s56 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v13, s58 +; SI-NEXT: v_mov_b32_e32 v27, s62 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v27, s72 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v27, s74 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v27, s76 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v57, s16 +; SI-NEXT: v_mov_b32_e32 v58, s17 +; SI-NEXT: v_mov_b32_e32 v45, s18 +; SI-NEXT: v_mov_b32_e32 v46, s19 +; SI-NEXT: v_mov_b32_e32 v40, s20 +; SI-NEXT: v_mov_b32_e32 v41, s21 +; SI-NEXT: v_mov_b32_e32 v52, s22 +; SI-NEXT: v_mov_b32_e32 v53, s23 +; SI-NEXT: v_mov_b32_e32 v48, s24 +; SI-NEXT: v_mov_b32_e32 v49, s25 +; SI-NEXT: v_mov_b32_e32 v35, s26 +; SI-NEXT: v_mov_b32_e32 v36, s27 +; SI-NEXT: v_mov_b32_e32 v29, s28 +; SI-NEXT: v_mov_b32_e32 v30, s29 +; SI-NEXT: v_mov_b32_e32 v25, s44 +; SI-NEXT: v_mov_b32_e32 v26, s45 +; SI-NEXT: v_mov_b32_e32 v20, s42 +; SI-NEXT: v_mov_b32_e32 v21, s43 +; SI-NEXT: v_mov_b32_e32 v15, s40 +; SI-NEXT: v_mov_b32_e32 v16, s41 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v12, s15 +; SI-NEXT: v_mov_b32_e32 v9, s12 +; SI-NEXT: v_mov_b32_e32 v10, s13 +; SI-NEXT: v_mov_b32_e32 v7, s10 +; SI-NEXT: v_mov_b32_e32 v8, s11 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: v_mov_b32_e32 v6, s9 +; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v4, s7 +; SI-NEXT: v_readlane_b32 s5, v61, 1 +; SI-NEXT: v_mov_b32_e32 v13, s60 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v27, s78 +; SI-NEXT: v_mov_b32_e32 v31, s88 +; SI-NEXT: v_mov_b32_e32 v32, s90 +; SI-NEXT: v_mov_b32_e32 v33, s92 +; SI-NEXT: v_mov_b32_e32 v37, s94 +; SI-NEXT: v_mov_b32_e32 v38, s30 +; SI-NEXT: v_mov_b32_e32 v50, s34 +; SI-NEXT: v_mov_b32_e32 v42, s36 +; SI-NEXT: v_mov_b32_e32 v43, s38 +; SI-NEXT: .LBB37_5: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v43 +; SI-NEXT: v_and_b32_e32 v57, 0xff, v57 +; SI-NEXT: v_and_b32_e32 v42, 0xff, v42 +; SI-NEXT: v_or_b32_e32 v43, v57, v43 ; SI-NEXT: v_lshlrev_b32_e32 v50, 24, v50 -; SI-NEXT: v_or_b32_e32 v50, v50, v54 -; SI-NEXT: v_and_b32_e32 v54, 0xffff, v41 -; SI-NEXT: v_or_b32_e32 v50, v54, v50 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v42 +; SI-NEXT: v_or_b32_e32 v50, v50, v42 +; SI-NEXT: v_and_b32_e32 v42, 0xffff, v43 +; SI-NEXT: v_or_b32_e32 v50, v42, v50 ; SI-NEXT: buffer_store_dword v50, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v50, 0xff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_or_b32_e32 v21, v50, v21 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v50, 0xff, v58 +; SI-NEXT: v_lshlrev_b32_e32 v54, 8, v54 +; SI-NEXT: v_and_b32_e32 v51, 0xff, v51 +; SI-NEXT: v_or_b32_e32 v50, v50, v54 +; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v39, 24, v39 +; SI-NEXT: v_or_b32_e32 v39, v39, v51 +; SI-NEXT: v_and_b32_e32 v50, 0xffff, v50 +; SI-NEXT: v_or_b32_e32 v39, v50, v39 +; SI-NEXT: v_add_i32_e32 v50, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v39, v50, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v38 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v39, 0xff, v45 +; SI-NEXT: v_and_b32_e32 v37, 0xff, v37 +; SI-NEXT: v_or_b32_e32 v38, v39, v38 +; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v33 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 +; SI-NEXT: v_or_b32_e32 v33, v33, v37 +; SI-NEXT: v_and_b32_e32 v37, 0xffff, v38 +; SI-NEXT: v_or_b32_e32 v33, v37, v33 +; SI-NEXT: v_add_i32_e32 v37, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v33, v37, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v44 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v39 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v32 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v33, 0xff, v46 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 +; SI-NEXT: v_or_b32_e32 v23, v33, v23 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v18 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v18, v22, v18 +; SI-NEXT: v_add_i32_e32 v22, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v18, v22, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v32 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v40 +; SI-NEXT: v_or_b32_e32 v18, v22, v18 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v31 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v27 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_or_b32_e32 v22, v23, v22 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v18, v22, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v18, 0xff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v34 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v24 +; SI-NEXT: v_or_b32_e32 v22, v23, v22 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v18, v22, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s84, v63, 28 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s70, v63, 22 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s64, v63, 16 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s50, v63, 10 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s36, v63, 4 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s34, v63, 2 +; SI-NEXT: v_readlane_b32 s31, v63, 1 +; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v52 +; SI-NEXT: v_or_b32_e32 v18, v22, v18 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_or_b32_e32 v22, v23, v22 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v18, v22, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v42 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v63 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v62 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v61 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v18, 0xff, v53 +; SI-NEXT: v_or_b32_e32 v17, v18, v17 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_or_b32_e32 v14, v14, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_add_i32_e32 v17, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v14, v17, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v17 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v48 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v13, v17, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: v_add_i32_e32 v14, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v29 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v23 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v13, 0xff, v49 +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v60 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v59 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v56 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v53 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v60 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v59 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v58 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v51 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v36 +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v47 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v55 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v29 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v13, 0xff, v30 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v57 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v46 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v45 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v25 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v38 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v26 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v43 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v55 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v52 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v33 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v21 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v17, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v49 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v34 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v15, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v28 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v16 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v15 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v15, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v31 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v24 -; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 +; SI-NEXT: v_add_i32_e32 v13, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xff, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 -; SI-NEXT: v_or_b32_e32 v1, v1, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v20 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_or_b32_e32 v1, v1, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_or_b32_e32 v12, v13, v12 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: v_add_i32_e32 v12, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v19, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v12 +; SI-NEXT: v_or_b32_e32 v11, v12, v11 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 +; SI-NEXT: v_add_i32_e32 v11, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xff, v10 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; SI-NEXT: v_or_b32_e32 v9, v9, v10 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: v_or_b32_e32 v9, v9, v10 +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v5 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; SI-NEXT: v_or_b32_e32 v7, v7, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_or_b32_e32 v8, v9, v8 +; SI-NEXT: v_or_b32_e32 v7, v7, v8 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v6 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v7 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v8 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v9 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 0x74, v0 +; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v10 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 @@ -44157,331 +45365,27 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a, ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v14 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v15 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v18 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB37_4: -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; kill: killed $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: s_branch .LBB37_2 ; ; VI-LABEL: bitcast_v32f32_to_v128i8_scalar: ; VI: ; %bb.0: @@ -71182,21 +72086,21 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_mov_b32_e32 v36, s16 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v35, s17 -; SI-NEXT: v_mov_b32_e32 v33, s18 -; SI-NEXT: v_mov_b32_e32 v32, s19 -; SI-NEXT: v_mov_b32_e32 v31, s20 -; SI-NEXT: v_mov_b32_e32 v29, s21 -; SI-NEXT: v_mov_b32_e32 v28, s22 +; SI-NEXT: v_mov_b32_e32 v31, s16 +; SI-NEXT: v_mov_b32_e32 v32, s17 +; SI-NEXT: v_mov_b32_e32 v29, s18 +; SI-NEXT: v_mov_b32_e32 v30, s19 +; SI-NEXT: v_mov_b32_e32 v27, s20 +; SI-NEXT: v_mov_b32_e32 v28, s21 +; SI-NEXT: v_mov_b32_e32 v25, s22 ; SI-NEXT: v_mov_b32_e32 v26, s23 -; SI-NEXT: v_mov_b32_e32 v25, s24 +; SI-NEXT: v_mov_b32_e32 v23, s24 ; SI-NEXT: v_mov_b32_e32 v24, s25 -; SI-NEXT: v_mov_b32_e32 v22, s26 -; SI-NEXT: v_mov_b32_e32 v21, s27 -; SI-NEXT: v_mov_b32_e32 v20, s28 -; SI-NEXT: v_mov_b32_e32 v19, s29 +; SI-NEXT: v_mov_b32_e32 v21, s26 +; SI-NEXT: v_mov_b32_e32 v22, s27 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v19, s28 +; SI-NEXT: v_mov_b32_e32 v20, s29 ; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill @@ -71215,234 +72119,242 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v23, v18, v17, 16 -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v27, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v30, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v34, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v38, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v48, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v50, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v52, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v41, v21, v22, 16 -; SI-NEXT: v_alignbit_b32 v43, v24, v25, 16 -; SI-NEXT: v_alignbit_b32 v45, v26, v28, 16 -; SI-NEXT: v_alignbit_b32 v47, v29, v31, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v32, v33, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v60, v35, v36, 16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v29 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v32 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v35 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[5:6], 16 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[36:37], v[15:16], 16 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v18 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[7:8], 16 +; SI-NEXT: v_mov_b32_e32 v53, v40 +; SI-NEXT: v_lshr_b64 v[39:40], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[40:41], v[25:26], 16 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshr_b64 v[54:55], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[41:42], v[27:28], 16 +; SI-NEXT: v_mov_b32_e32 v55, v48 +; SI-NEXT: v_lshr_b64 v[48:49], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[29:30], 16 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v32 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[43:44], v[31:32], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true ; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 ; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v35, 1.0, v35 -; SI-NEXT: v_add_f32_e32 v36, 1.0, v36 -; SI-NEXT: v_add_f32_e32 v32, 1.0, v32 -; SI-NEXT: v_add_f32_e32 v33, 1.0, v33 -; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 -; SI-NEXT: v_add_f32_e32 v31, 1.0, v31 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[33:34], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 ; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 +; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[7:8], 16 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 ; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_lshr_b64 v[39:40], v[23:24], 16 +; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[40:41], v[25:26], 16 +; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 +; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 ; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 ; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 ; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_alignbit_b32 v23, v18, v17, 16 -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v27, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v30, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v34, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v38, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v48, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v50, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v52, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v41, v21, v22, 16 -; SI-NEXT: v_alignbit_b32 v43, v24, v25, 16 -; SI-NEXT: v_alignbit_b32 v45, v26, v28, 16 -; SI-NEXT: v_alignbit_b32 v47, v29, v31, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v32, v33, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v60, v35, v36, 16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v29 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[33:34], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[41:42], v[27:28], 16 +; SI-NEXT: v_add_f32_e32 v32, 1.0, v32 +; SI-NEXT: v_add_f32_e32 v31, 1.0, v31 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[51:52], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[29:30], 16 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[54:55], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[43:44], v[31:32], 16 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v22 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v35 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v32 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: v_and_b32_e32 v36, 0xffff, v36 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v60 -; SI-NEXT: v_and_b32_e32 v35, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_or_b32_e32 v36, v36, v60 -; SI-NEXT: v_or_b32_e32 v23, v35, v23 -; SI-NEXT: v_add_i32_e32 v35, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v36, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v23, v35, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v58 -; SI-NEXT: v_or_b32_e32 v23, v23, v33 -; SI-NEXT: v_add_i32_e32 v33, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v23, v33, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v63 -; SI-NEXT: v_or_b32_e32 v23, v23, v32 -; SI-NEXT: v_add_i32_e32 v32, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v23, v32, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v47 -; SI-NEXT: v_or_b32_e32 v23, v23, v31 -; SI-NEXT: v_add_i32_e32 v31, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v23, v31, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v62 -; SI-NEXT: v_or_b32_e32 v23, v23, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v23, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v28 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v43 +; SI-NEXT: v_and_b32_e32 v31, 0xffff, v31 +; SI-NEXT: v_or_b32_e32 v31, v31, v50 +; SI-NEXT: buffer_store_dword v31, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v31, 0xffff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v35 +; SI-NEXT: v_or_b32_e32 v31, v31, v32 +; SI-NEXT: v_add_i32_e32 v32, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v31, v32, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v42 +; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 +; SI-NEXT: v_or_b32_e32 v29, v29, v31 +; SI-NEXT: v_add_i32_e32 v31, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v29, v31, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v29, 0xffff, v30 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v46 +; SI-NEXT: v_or_b32_e32 v29, v29, v30 +; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v41 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 +; SI-NEXT: v_or_b32_e32 v27, v27, v29 +; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v27, v29, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 ; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v45 -; SI-NEXT: v_or_b32_e32 v23, v23, v28 -; SI-NEXT: v_add_i32_e32 v28, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v23, v28, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v40 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v25, v25, v27 +; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v61 -; SI-NEXT: v_or_b32_e32 v23, v23, v26 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v34 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 ; SI-NEXT: v_add_i32_e32 v26, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v23, v26, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v39 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v59 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v33 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v41 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 +; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v57 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v63 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v54 -; SI-NEXT: v_or_b32_e32 v20, v20, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v49 +; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v56 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v62 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v61 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v60 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v59 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -71454,7 +72366,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v58 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -71466,57 +72378,64 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v57 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v56 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v47 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v17 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen @@ -71539,39 +72458,44 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr47 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; kill: killed $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v32f32_to_v64i16_scalar: @@ -79027,884 +79951,1220 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v16i64_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v41, s30, 0 -; SI-NEXT: v_writelane_b32 v41, s31, 1 -; SI-NEXT: v_writelane_b32 v41, s34, 2 -; SI-NEXT: v_writelane_b32 v41, s35, 3 -; SI-NEXT: v_writelane_b32 v41, s36, 4 -; SI-NEXT: v_writelane_b32 v41, s37, 5 -; SI-NEXT: v_writelane_b32 v41, s38, 6 -; SI-NEXT: v_writelane_b32 v41, s39, 7 -; SI-NEXT: v_writelane_b32 v41, s48, 8 -; SI-NEXT: v_writelane_b32 v41, s49, 9 -; SI-NEXT: v_writelane_b32 v41, s50, 10 -; SI-NEXT: v_writelane_b32 v41, s51, 11 -; SI-NEXT: v_writelane_b32 v41, s52, 12 -; SI-NEXT: v_writelane_b32 v41, s53, 13 -; SI-NEXT: v_writelane_b32 v41, s54, 14 -; SI-NEXT: v_writelane_b32 v41, s55, 15 -; SI-NEXT: v_writelane_b32 v41, s64, 16 -; SI-NEXT: v_writelane_b32 v41, s65, 17 -; SI-NEXT: v_writelane_b32 v41, s66, 18 -; SI-NEXT: v_writelane_b32 v41, s67, 19 -; SI-NEXT: v_writelane_b32 v41, s68, 20 -; SI-NEXT: v_writelane_b32 v41, s69, 21 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_writelane_b32 v20, s69, 21 +; SI-NEXT: v_writelane_b32 v20, s70, 22 +; SI-NEXT: v_writelane_b32 v20, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s80, 24 +; SI-NEXT: v_writelane_b32 v20, s81, 25 +; SI-NEXT: v_writelane_b32 v20, s82, 26 +; SI-NEXT: v_writelane_b32 v20, s83, 27 +; SI-NEXT: v_writelane_b32 v20, s84, 28 +; SI-NEXT: v_writelane_b32 v20, s85, 29 +; SI-NEXT: v_writelane_b32 v20, s86, 30 +; SI-NEXT: v_writelane_b32 v20, s87, 31 +; SI-NEXT: v_writelane_b32 v20, s96, 32 +; SI-NEXT: v_writelane_b32 v20, s97, 33 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_writelane_b32 v41, s70, 22 -; SI-NEXT: v_readfirstlane_b32 s47, v1 -; SI-NEXT: v_readfirstlane_b32 s46, v2 -; SI-NEXT: v_readfirstlane_b32 s45, v3 -; SI-NEXT: v_readfirstlane_b32 s44, v4 -; SI-NEXT: v_readfirstlane_b32 s43, v5 -; SI-NEXT: v_readfirstlane_b32 s42, v6 -; SI-NEXT: v_readfirstlane_b32 s41, v7 -; SI-NEXT: v_readfirstlane_b32 s40, v8 -; SI-NEXT: v_readfirstlane_b32 s15, v9 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s12, v12 -; SI-NEXT: v_readfirstlane_b32 s11, v13 -; SI-NEXT: v_readfirstlane_b32 s10, v14 -; SI-NEXT: v_readfirstlane_b32 s9, v15 -; SI-NEXT: v_readfirstlane_b32 s8, v16 -; SI-NEXT: v_readfirstlane_b32 s7, v17 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v18 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_writelane_b32 v41, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s98, 34 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: v_writelane_b32 v20, s99, 35 +; SI-NEXT: ; implicit-def: $vgpr22 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane ; SI-NEXT: s_cbranch_scc0 .LBB57_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v22, s45 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s9 -; SI-NEXT: v_mov_b32_e32 v9, s11 -; SI-NEXT: v_mov_b32_e32 v12, s13 -; SI-NEXT: v_mov_b32_e32 v15, s15 -; SI-NEXT: v_mov_b32_e32 v18, s41 -; SI-NEXT: v_mov_b32_e32 v21, s43 -; SI-NEXT: v_alignbit_b32 v24, s44, v22, 24 -; SI-NEXT: v_alignbit_b32 v25, s44, v22, 16 -; SI-NEXT: v_alignbit_b32 v26, s44, v22, 8 -; SI-NEXT: v_mov_b32_e32 v22, s47 -; SI-NEXT: v_mov_b32_e32 v23, s28 -; SI-NEXT: v_mov_b32_e32 v29, s26 -; SI-NEXT: v_mov_b32_e32 v35, s24 -; SI-NEXT: v_mov_b32_e32 v39, s22 -; SI-NEXT: v_mov_b32_e32 v50, s20 -; SI-NEXT: v_mov_b32_e32 v53, s18 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s8, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s8, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s8, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s12, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s12, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s12, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s14, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s14, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s14, v15, 8 -; SI-NEXT: v_alignbit_b32 v16, s40, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s40, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s40, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8 -; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24 -; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16 -; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8 -; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24 -; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8 -; SI-NEXT: v_alignbit_b32 v27, s27, v29, 24 -; SI-NEXT: v_alignbit_b32 v28, s27, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, s27, v29, 8 -; SI-NEXT: v_alignbit_b32 v33, s25, v35, 24 -; SI-NEXT: v_alignbit_b32 v34, s25, v35, 16 -; SI-NEXT: v_alignbit_b32 v35, s25, v35, 8 -; SI-NEXT: v_alignbit_b32 v37, s23, v39, 24 -; SI-NEXT: v_alignbit_b32 v38, s23, v39, 16 -; SI-NEXT: v_alignbit_b32 v39, s23, v39, 8 -; SI-NEXT: v_alignbit_b32 v48, s21, v50, 24 -; SI-NEXT: v_alignbit_b32 v49, s21, v50, 16 -; SI-NEXT: v_alignbit_b32 v50, s21, v50, 8 -; SI-NEXT: v_alignbit_b32 v51, s19, v53, 24 -; SI-NEXT: v_alignbit_b32 v52, s19, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, s19, v53, 8 -; SI-NEXT: v_alignbit_b32 v54, s17, v40, 24 -; SI-NEXT: v_alignbit_b32 v55, s17, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, s17, v40, 8 -; SI-NEXT: s_lshr_b32 s56, s6, 24 -; SI-NEXT: s_lshr_b32 s57, s6, 16 -; SI-NEXT: s_lshr_b32 s58, s6, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s60, s8, 16 -; SI-NEXT: s_lshr_b32 s61, s8, 8 -; SI-NEXT: s_lshr_b32 s62, s10, 24 -; SI-NEXT: s_lshr_b32 s63, s10, 16 -; SI-NEXT: s_lshr_b32 s72, s10, 8 -; SI-NEXT: s_lshr_b32 s73, s12, 24 -; SI-NEXT: s_lshr_b32 s74, s12, 16 -; SI-NEXT: s_lshr_b32 s75, s12, 8 -; SI-NEXT: s_lshr_b32 s76, s14, 24 -; SI-NEXT: s_lshr_b32 s77, s14, 16 -; SI-NEXT: s_lshr_b32 s78, s14, 8 -; SI-NEXT: s_lshr_b32 s79, s40, 24 -; SI-NEXT: s_lshr_b32 s88, s40, 16 -; SI-NEXT: s_lshr_b32 s89, s40, 8 -; SI-NEXT: s_lshr_b32 s90, s42, 24 -; SI-NEXT: s_lshr_b32 s91, s42, 16 -; SI-NEXT: s_lshr_b32 s92, s42, 8 -; SI-NEXT: s_lshr_b32 s93, s44, 24 -; SI-NEXT: s_lshr_b32 s94, s44, 16 -; SI-NEXT: s_lshr_b32 s95, s44, 8 -; SI-NEXT: s_lshr_b32 s30, s46, 24 -; SI-NEXT: s_lshr_b32 s31, s46, 16 -; SI-NEXT: s_lshr_b32 s34, s46, 8 -; SI-NEXT: s_lshr_b32 s35, s29, 24 -; SI-NEXT: s_lshr_b32 s36, s29, 16 -; SI-NEXT: s_lshr_b32 s37, s29, 8 -; SI-NEXT: s_lshr_b32 s38, s27, 24 -; SI-NEXT: s_lshr_b32 s39, s27, 16 -; SI-NEXT: s_lshr_b32 s48, s27, 8 -; SI-NEXT: s_lshr_b32 s49, s25, 24 -; SI-NEXT: s_lshr_b32 s50, s25, 16 -; SI-NEXT: s_lshr_b32 s51, s25, 8 -; SI-NEXT: s_lshr_b32 s52, s23, 24 -; SI-NEXT: s_lshr_b32 s53, s23, 16 -; SI-NEXT: s_lshr_b32 s54, s23, 8 -; SI-NEXT: s_lshr_b32 s55, s21, 24 -; SI-NEXT: s_lshr_b32 s64, s21, 16 -; SI-NEXT: s_lshr_b32 s65, s21, 8 -; SI-NEXT: s_lshr_b32 s66, s19, 24 -; SI-NEXT: s_lshr_b32 s67, s19, 16 -; SI-NEXT: s_lshr_b32 s68, s19, 8 -; SI-NEXT: s_lshr_b32 s69, s17, 24 -; SI-NEXT: s_lshr_b32 s70, s17, 16 -; SI-NEXT: s_lshr_b32 s71, s17, 8 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v22, s46, 40 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 41 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s9, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 38 +; SI-NEXT: v_writelane_b32 v22, s47, 39 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 36 +; SI-NEXT: v_writelane_b32 v22, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 34 +; SI-NEXT: v_writelane_b32 v22, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 32 +; SI-NEXT: v_writelane_b32 v22, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 30 +; SI-NEXT: v_writelane_b32 v22, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 28 +; SI-NEXT: v_writelane_b32 v22, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 26 +; SI-NEXT: v_writelane_b32 v22, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 24 +; SI-NEXT: v_writelane_b32 v22, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 22 +; SI-NEXT: v_writelane_b32 v22, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 20 +; SI-NEXT: v_writelane_b32 v22, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 18 +; SI-NEXT: v_writelane_b32 v22, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 16 +; SI-NEXT: v_writelane_b32 v22, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 14 +; SI-NEXT: v_writelane_b32 v22, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 12 +; SI-NEXT: v_writelane_b32 v22, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 10 +; SI-NEXT: v_writelane_b32 v22, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 6 +; SI-NEXT: v_writelane_b32 v22, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 4 +; SI-NEXT: v_writelane_b32 v22, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 2 +; SI-NEXT: v_writelane_b32 v22, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 0 +; SI-NEXT: s_lshr_b32 s49, s19, 24 +; SI-NEXT: s_lshr_b32 s48, s19, 16 +; SI-NEXT: s_lshr_b32 s50, s19, 8 +; SI-NEXT: s_lshr_b32 s51, s17, 24 +; SI-NEXT: s_lshr_b32 s52, s17, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 8 +; SI-NEXT: s_lshr_b64 s[54:55], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v22, s47, 1 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 +; SI-NEXT: s_add_u32 s40, s40, 3 +; SI-NEXT: s_addc_u32 s41, s41, 0 +; SI-NEXT: s_add_u32 s42, s42, 3 +; SI-NEXT: s_addc_u32 s43, s43, 0 +; SI-NEXT: s_add_u32 s44, s44, 3 +; SI-NEXT: s_addc_u32 s45, s45, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s47, s47, 3 -; SI-NEXT: s_addc_u32 s46, s46, 0 -; SI-NEXT: s_add_u32 s45, s45, 3 -; SI-NEXT: s_addc_u32 s44, s44, 0 -; SI-NEXT: s_add_u32 s43, s43, 3 -; SI-NEXT: s_addc_u32 s42, s42, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: v_mov_b32_e32 v22, s45 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s9 -; SI-NEXT: v_mov_b32_e32 v9, s11 -; SI-NEXT: v_mov_b32_e32 v12, s13 -; SI-NEXT: v_mov_b32_e32 v15, s15 -; SI-NEXT: v_mov_b32_e32 v18, s41 -; SI-NEXT: v_mov_b32_e32 v21, s43 -; SI-NEXT: v_alignbit_b32 v24, s44, v22, 24 -; SI-NEXT: v_alignbit_b32 v25, s44, v22, 16 -; SI-NEXT: v_alignbit_b32 v26, s44, v22, 8 -; SI-NEXT: v_mov_b32_e32 v22, s47 -; SI-NEXT: v_mov_b32_e32 v23, s28 -; SI-NEXT: v_mov_b32_e32 v29, s26 -; SI-NEXT: v_mov_b32_e32 v35, s24 -; SI-NEXT: v_mov_b32_e32 v39, s22 -; SI-NEXT: v_mov_b32_e32 v50, s20 -; SI-NEXT: v_mov_b32_e32 v53, s18 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v40, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s8, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s8, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s8, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s12, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s12, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s12, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s14, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s14, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s14, v15, 8 -; SI-NEXT: v_alignbit_b32 v16, s40, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s40, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s40, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8 -; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24 -; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16 -; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8 -; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24 -; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8 -; SI-NEXT: v_alignbit_b32 v27, s27, v29, 24 -; SI-NEXT: v_alignbit_b32 v28, s27, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, s27, v29, 8 -; SI-NEXT: v_alignbit_b32 v33, s25, v35, 24 -; SI-NEXT: v_alignbit_b32 v34, s25, v35, 16 -; SI-NEXT: v_alignbit_b32 v35, s25, v35, 8 -; SI-NEXT: v_alignbit_b32 v37, s23, v39, 24 -; SI-NEXT: v_alignbit_b32 v38, s23, v39, 16 -; SI-NEXT: v_alignbit_b32 v39, s23, v39, 8 -; SI-NEXT: v_alignbit_b32 v48, s21, v50, 24 -; SI-NEXT: v_alignbit_b32 v49, s21, v50, 16 -; SI-NEXT: v_alignbit_b32 v50, s21, v50, 8 -; SI-NEXT: v_alignbit_b32 v51, s19, v53, 24 -; SI-NEXT: v_alignbit_b32 v52, s19, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, s19, v53, 8 -; SI-NEXT: v_alignbit_b32 v54, s17, v40, 24 -; SI-NEXT: v_alignbit_b32 v55, s17, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, s17, v40, 8 -; SI-NEXT: s_lshr_b32 s56, s6, 24 -; SI-NEXT: s_lshr_b32 s57, s6, 16 -; SI-NEXT: s_lshr_b32 s58, s6, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s60, s8, 16 -; SI-NEXT: s_lshr_b32 s61, s8, 8 -; SI-NEXT: s_lshr_b32 s62, s10, 24 -; SI-NEXT: s_lshr_b32 s63, s10, 16 -; SI-NEXT: s_lshr_b32 s72, s10, 8 -; SI-NEXT: s_lshr_b32 s73, s12, 24 -; SI-NEXT: s_lshr_b32 s74, s12, 16 -; SI-NEXT: s_lshr_b32 s75, s12, 8 -; SI-NEXT: s_lshr_b32 s76, s14, 24 -; SI-NEXT: s_lshr_b32 s77, s14, 16 -; SI-NEXT: s_lshr_b32 s78, s14, 8 -; SI-NEXT: s_lshr_b32 s79, s40, 24 -; SI-NEXT: s_lshr_b32 s88, s40, 16 -; SI-NEXT: s_lshr_b32 s89, s40, 8 -; SI-NEXT: s_lshr_b32 s90, s42, 24 -; SI-NEXT: s_lshr_b32 s91, s42, 16 -; SI-NEXT: s_lshr_b32 s92, s42, 8 -; SI-NEXT: s_lshr_b32 s93, s44, 24 -; SI-NEXT: s_lshr_b32 s94, s44, 16 -; SI-NEXT: s_lshr_b32 s95, s44, 8 -; SI-NEXT: s_lshr_b32 s30, s46, 24 -; SI-NEXT: s_lshr_b32 s31, s46, 16 -; SI-NEXT: s_lshr_b32 s34, s46, 8 -; SI-NEXT: s_lshr_b32 s35, s29, 24 -; SI-NEXT: s_lshr_b32 s36, s29, 16 -; SI-NEXT: s_lshr_b32 s37, s29, 8 -; SI-NEXT: s_lshr_b32 s38, s27, 24 -; SI-NEXT: s_lshr_b32 s39, s27, 16 -; SI-NEXT: s_lshr_b32 s48, s27, 8 -; SI-NEXT: s_lshr_b32 s49, s25, 24 -; SI-NEXT: s_lshr_b32 s50, s25, 16 -; SI-NEXT: s_lshr_b32 s51, s25, 8 -; SI-NEXT: s_lshr_b32 s52, s23, 24 -; SI-NEXT: s_lshr_b32 s53, s23, 16 -; SI-NEXT: s_lshr_b32 s54, s23, 8 -; SI-NEXT: s_lshr_b32 s55, s21, 24 -; SI-NEXT: s_lshr_b32 s64, s21, 16 -; SI-NEXT: s_lshr_b32 s65, s21, 8 -; SI-NEXT: s_lshr_b32 s66, s19, 24 -; SI-NEXT: s_lshr_b32 s67, s19, 16 -; SI-NEXT: s_lshr_b32 s68, s19, 8 -; SI-NEXT: s_lshr_b32 s69, s17, 24 -; SI-NEXT: s_lshr_b32 s70, s17, 16 -; SI-NEXT: s_lshr_b32 s71, s17, 8 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 40 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 41 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s9, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v22, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v22, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v22, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v21, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v21, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 8 +; SI-NEXT: v_writelane_b32 v21, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 38 +; SI-NEXT: v_writelane_b32 v22, s47, 39 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 36 +; SI-NEXT: v_writelane_b32 v22, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 34 +; SI-NEXT: v_writelane_b32 v22, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 32 +; SI-NEXT: v_writelane_b32 v22, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 30 +; SI-NEXT: v_writelane_b32 v22, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 28 +; SI-NEXT: v_writelane_b32 v22, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 26 +; SI-NEXT: v_writelane_b32 v22, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 24 +; SI-NEXT: v_writelane_b32 v22, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 22 +; SI-NEXT: v_writelane_b32 v22, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 20 +; SI-NEXT: v_writelane_b32 v22, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 18 +; SI-NEXT: v_writelane_b32 v22, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 16 +; SI-NEXT: v_writelane_b32 v22, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 14 +; SI-NEXT: v_writelane_b32 v22, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 12 +; SI-NEXT: v_writelane_b32 v22, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 10 +; SI-NEXT: v_writelane_b32 v22, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 8 +; SI-NEXT: v_writelane_b32 v22, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 6 +; SI-NEXT: v_writelane_b32 v22, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v22, s46, 4 +; SI-NEXT: v_writelane_b32 v22, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v22, s46, 2 +; SI-NEXT: v_writelane_b32 v22, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v22, s46, 0 +; SI-NEXT: s_lshr_b32 s49, s19, 24 +; SI-NEXT: s_lshr_b32 s48, s19, 16 +; SI-NEXT: s_lshr_b32 s50, s19, 8 +; SI-NEXT: s_lshr_b32 s51, s17, 24 +; SI-NEXT: s_lshr_b32 s52, s17, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 8 +; SI-NEXT: s_lshr_b64 s[54:55], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v22, s47, 1 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 ; SI-NEXT: .LBB57_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v40 -; SI-NEXT: v_or_b32_e32 v40, s4, v40 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s71, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s70, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s69, 24 -; SI-NEXT: v_and_b32_e32 v55, 0xff, v55 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v55 -; SI-NEXT: v_lshlrev_b32_e32 v54, 24, v54 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v54, v54, v55 -; SI-NEXT: v_mov_b32_e32 v55, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v53, 8, v53 -; SI-NEXT: v_or_b32_e32 v53, s4, v53 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s68, 8 -; SI-NEXT: v_and_b32_e32 v52, 0xff, v52 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s67, 0xff -; SI-NEXT: v_and_b32_e32 v40, 0xffff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52 -; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v51 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s66, 24 -; SI-NEXT: v_or_b32_e32 v54, v40, v54 -; SI-NEXT: v_and_b32_e32 v53, 0xffff, v53 -; SI-NEXT: v_or_b32_e32 v51, v51, v52 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: buffer_store_dword v54, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v54, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v51, v53, v51 -; SI-NEXT: v_add_i32_e32 v52, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v55, v54, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v51, v52, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v52, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v50, 8, v50 -; SI-NEXT: v_or_b32_e32 v50, s4, v50 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s65, 8 -; SI-NEXT: v_and_b32_e32 v49, 0xff, v49 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s64, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 -; SI-NEXT: v_lshlrev_b32_e32 v48, 24, v48 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s55, 24 -; SI-NEXT: v_and_b32_e32 v50, 0xffff, v50 -; SI-NEXT: v_or_b32_e32 v48, v48, v49 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: s_lshl_b32 s47, s38, 8 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: s_and_b32 s47, s36, 0xff +; SI-NEXT: s_lshl_b32 s57, s34, 24 +; SI-NEXT: s_lshl_b32 s47, s47, 16 +; SI-NEXT: s_or_b32 s47, s57, s47 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xff +; SI-NEXT: s_lshl_b32 s17, s53, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s52, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s47, s51, 24 +; SI-NEXT: s_or_b32 s17, s47, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s30, 8 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s94, 0xff +; SI-NEXT: s_lshl_b32 s18, s92, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xff +; SI-NEXT: s_lshl_b32 s17, s50, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s48, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s49, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s90, 8 +; SI-NEXT: s_and_b32 s17, s20, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s88, 0xff +; SI-NEXT: s_lshl_b32 s18, s78, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 17 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 16 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 15 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v6, s16 +; SI-NEXT: s_lshl_b32 s16, s76, 8 +; SI-NEXT: s_and_b32 s17, s22, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s74, 0xff +; SI-NEXT: s_lshl_b32 s18, s72, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 14 +; SI-NEXT: v_mov_b32_e32 v7, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 13 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 12 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v8, s16 +; SI-NEXT: s_lshl_b32 s16, s62, 8 +; SI-NEXT: s_and_b32 s17, s24, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s60, 0xff +; SI-NEXT: s_lshl_b32 s18, s58, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 11 +; SI-NEXT: v_mov_b32_e32 v9, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 10 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 9 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: s_lshl_b32 s16, s56, 8 +; SI-NEXT: s_and_b32 s17, s26, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s46, 0xff +; SI-NEXT: s_lshl_b32 s18, s98, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 8 +; SI-NEXT: v_mov_b32_e32 v11, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 7 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 6 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v12, s16 +; SI-NEXT: s_lshl_b32 s16, s96, 8 +; SI-NEXT: s_and_b32 s17, s28, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s86, 0xff +; SI-NEXT: s_lshl_b32 s18, s84, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 5 +; SI-NEXT: v_mov_b32_e32 v13, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 4 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 3 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v14, s16 +; SI-NEXT: s_lshl_b32 s16, s82, 8 +; SI-NEXT: s_and_b32 s17, s44, 0xff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s80, 0xff +; SI-NEXT: s_lshl_b32 s18, s70, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 2 +; SI-NEXT: v_mov_b32_e32 v15, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v21, 1 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 0 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v51, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v48, v50, v48 -; SI-NEXT: v_add_i32_e32 v49, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v52, v51, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v48, v49, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v49, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v39, 8, v39 -; SI-NEXT: v_or_b32_e32 v39, s4, v39 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s54, 8 -; SI-NEXT: v_and_b32_e32 v38, 0xff, v38 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s53, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v38 -; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v37 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s52, 24 -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v39 -; SI-NEXT: v_or_b32_e32 v37, v37, v38 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: v_mov_b32_e32 v16, s16 +; SI-NEXT: s_lshl_b32 s16, s68, 8 +; SI-NEXT: s_and_b32 s17, s42, 0xff +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s66, 0xff +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_lshl_b32 s18, s64, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: v_readlane_b32 s17, v22, 63 +; SI-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 62 +; SI-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 61 +; SI-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v22, 0 +; SI-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: v_readlane_b32 s19, v22, 1 +; SI-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_lshl_b32 s17, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v22, 2 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v48, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v37, v39, v37 -; SI-NEXT: v_add_i32_e32 v38, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v49, v48, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v37, v38, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v38, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v35, 8, v35 -; SI-NEXT: v_or_b32_e32 v35, s4, v35 -; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s51, 8 -; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s50, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 -; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v33 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s49, 24 -; SI-NEXT: v_and_b32_e32 v35, 0xffff, v35 -; SI-NEXT: v_or_b32_e32 v33, v33, v34 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: v_readlane_b32 s19, v22, 3 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 4 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 60 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v37, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v33, v35, v33 -; SI-NEXT: v_add_i32_e32 v34, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v38, v37, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v33, v34, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v34, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v29 -; SI-NEXT: v_or_b32_e32 v29, s4, v29 -; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s5, s48, 8 -; SI-NEXT: v_and_b32_e32 v28, 0xff, v28 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s39, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s38, 24 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v22, 59 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 58 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v33, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v27, v29, v27 -; SI-NEXT: v_add_i32_e32 v28, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v34, v33, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v28, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; SI-NEXT: v_or_b32_e32 v23, s4, v23 -; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s37, 8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: v_readlane_b32 s16, v22, 6 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s17, v22, 7 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: v_readlane_b32 s19, v22, 5 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: v_readlane_b32 s16, v22, 8 +; SI-NEXT: v_readlane_b32 s17, v22, 9 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: v_readlane_b32 s18, v22, 10 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s18, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v27, vcc, 44, v0 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s36, 0xff -; SI-NEXT: buffer_store_dword v28, v27, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v36 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s35, 24 -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 -; SI-NEXT: v_or_b32_e32 v22, v27, v22 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_add_i32_e32 v23, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: v_readlane_b32 s15, v22, 57 +; SI-NEXT: s_lshl_b32 s15, s15, 8 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_readlane_b32 s15, v22, 56 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: v_readlane_b32 s16, v22, 55 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_lshl_b32 s16, s16, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s47, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v32 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s46, 0xff -; SI-NEXT: s_lshl_b32 s5, s34, 8 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_readlane_b32 s14, v22, 12 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s15, v22, 13 +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_readlane_b32 s14, v22, 14 +; SI-NEXT: v_readlane_b32 s15, v22, 15 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s16, v22, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s16, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v31 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s31, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v30 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s30, 24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v27, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: v_readlane_b32 s13, v22, 54 +; SI-NEXT: s_lshl_b32 s13, s13, 8 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_readlane_b32 s13, v22, 53 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: v_readlane_b32 s14, v22, 52 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 60, v0 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v26 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s44, 0xff -; SI-NEXT: s_lshl_b32 s5, s95, 8 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_readlane_b32 s12, v22, 18 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s13, v22, 19 +; SI-NEXT: s_lshl_b32 s12, s12, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: v_readlane_b32 s12, v22, 20 +; SI-NEXT: v_readlane_b32 s13, v22, 21 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s14, v22, 22 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s14, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v25 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s94, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s93, 24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v24, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v21, s4, v21 -; SI-NEXT: s_and_b32 s4, s42, 0xff -; SI-NEXT: s_lshl_b32 s5, s92, 8 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s91, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s90, 24 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: v_readlane_b32 s11, v22, 51 +; SI-NEXT: s_lshl_b32 s11, s11, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_readlane_b32 s11, v22, 50 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: v_readlane_b32 s12, v22, 49 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s12, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v19, v21, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v20, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 -; SI-NEXT: v_or_b32_e32 v18, s4, v18 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s5, s89, 8 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s88, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s79, 24 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: v_readlane_b32 s10, v22, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s11, v22, 25 +; SI-NEXT: s_lshl_b32 s10, s10, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: v_readlane_b32 s10, v22, 26 +; SI-NEXT: v_readlane_b32 s11, v22, 27 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s12, v22, 28 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v16, v18, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v20, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s5, s78, 8 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s77, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s76, 24 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: v_readlane_b32 s9, v22, 48 +; SI-NEXT: s_lshl_b32 s9, s9, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_readlane_b32 s9, v22, 47 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: v_readlane_b32 s10, v22, 46 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s10, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: s_lshl_b32 s5, s75, 8 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s74, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s73, 24 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_readlane_b32 s8, v22, 30 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s9, v22, 31 +; SI-NEXT: s_lshl_b32 s8, s8, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: v_readlane_b32 s8, v22, 32 +; SI-NEXT: v_readlane_b32 s9, v22, 33 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s10, v22, 34 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s72, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s63, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s62, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: v_readlane_b32 s7, v22, 45 +; SI-NEXT: s_lshl_b32 s7, s7, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_readlane_b32 s7, v22, 44 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: v_readlane_b32 s8, v22, 43 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s8, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s61, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s60, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s8, s59, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_readlane_b32 s6, v22, 36 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: v_readlane_b32 s7, v22, 37 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s54, 0xff +; SI-NEXT: v_readlane_b32 s8, v22, 38 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s8, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s58, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: v_readlane_b32 s5, v22, 42 +; SI-NEXT: s_lshl_b32 s5, s5, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s57, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: v_readlane_b32 s5, v22, 41 +; SI-NEXT: s_and_b32 s5, s5, 0xff +; SI-NEXT: v_readlane_b32 s6, v22, 40 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s56, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s6, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x74, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s19, v22, 11 +; SI-NEXT: v_readlane_b32 s17, v22, 17 +; SI-NEXT: v_readlane_b32 s15, v22, 23 +; SI-NEXT: v_readlane_b32 s13, v22, 29 +; SI-NEXT: v_readlane_b32 s11, v22, 35 +; SI-NEXT: v_readlane_b32 s9, v22, 39 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s71, v41, 23 -; SI-NEXT: v_readlane_b32 s70, v41, 22 -; SI-NEXT: v_readlane_b32 s69, v41, 21 -; SI-NEXT: v_readlane_b32 s68, v41, 20 -; SI-NEXT: v_readlane_b32 s67, v41, 19 -; SI-NEXT: v_readlane_b32 s66, v41, 18 -; SI-NEXT: v_readlane_b32 s65, v41, 17 -; SI-NEXT: v_readlane_b32 s64, v41, 16 -; SI-NEXT: v_readlane_b32 s55, v41, 15 -; SI-NEXT: v_readlane_b32 s54, v41, 14 -; SI-NEXT: v_readlane_b32 s53, v41, 13 -; SI-NEXT: v_readlane_b32 s52, v41, 12 -; SI-NEXT: v_readlane_b32 s51, v41, 11 -; SI-NEXT: v_readlane_b32 s50, v41, 10 -; SI-NEXT: v_readlane_b32 s49, v41, 9 -; SI-NEXT: v_readlane_b32 s48, v41, 8 -; SI-NEXT: v_readlane_b32 s39, v41, 7 -; SI-NEXT: v_readlane_b32 s38, v41, 6 -; SI-NEXT: v_readlane_b32 s37, v41, 5 -; SI-NEXT: v_readlane_b32 s36, v41, 4 -; SI-NEXT: v_readlane_b32 s35, v41, 3 -; SI-NEXT: v_readlane_b32 s34, v41, 2 -; SI-NEXT: v_readlane_b32 s31, v41, 1 -; SI-NEXT: v_readlane_b32 s30, v41, 0 -; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v20, 35 +; SI-NEXT: v_readlane_b32 s98, v20, 34 +; SI-NEXT: v_readlane_b32 s97, v20, 33 +; SI-NEXT: v_readlane_b32 s96, v20, 32 +; SI-NEXT: v_readlane_b32 s87, v20, 31 +; SI-NEXT: v_readlane_b32 s86, v20, 30 +; SI-NEXT: v_readlane_b32 s85, v20, 29 +; SI-NEXT: v_readlane_b32 s84, v20, 28 +; SI-NEXT: v_readlane_b32 s83, v20, 27 +; SI-NEXT: v_readlane_b32 s82, v20, 26 +; SI-NEXT: v_readlane_b32 s81, v20, 25 +; SI-NEXT: v_readlane_b32 s80, v20, 24 +; SI-NEXT: v_readlane_b32 s71, v20, 23 +; SI-NEXT: v_readlane_b32 s70, v20, 22 +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $sgpr71 -; SI-NEXT: ; implicit-def: $sgpr70 -; SI-NEXT: ; implicit-def: $sgpr69 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $sgpr68 -; SI-NEXT: ; implicit-def: $sgpr67 -; SI-NEXT: ; implicit-def: $sgpr66 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $sgpr65 -; SI-NEXT: ; implicit-def: $sgpr64 -; SI-NEXT: ; implicit-def: $sgpr55 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v22, s54, 0 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s55, 1 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 ; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr52 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr50 -; SI-NEXT: ; implicit-def: $sgpr49 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $sgpr48 -; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr38 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr36 -; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr34 -; SI-NEXT: ; implicit-def: $sgpr31 ; SI-NEXT: ; implicit-def: $sgpr30 -; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr94 -; SI-NEXT: ; implicit-def: $sgpr93 ; SI-NEXT: ; implicit-def: $sgpr92 -; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr90 -; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr88 -; SI-NEXT: ; implicit-def: $sgpr79 ; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $sgpr63 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: s_branch .LBB57_2 -; -; VI-LABEL: bitcast_v16i64_to_v128i8_scalar: -; VI: ; %bb.0: -; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill -; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; VI-NEXT: s_mov_b64 exec, s[4:5] -; VI-NEXT: v_writelane_b32 v20, s30, 0 -; VI-NEXT: v_writelane_b32 v20, s31, 1 -; VI-NEXT: v_writelane_b32 v20, s34, 2 -; VI-NEXT: v_writelane_b32 v20, s35, 3 -; VI-NEXT: v_writelane_b32 v20, s36, 4 -; VI-NEXT: v_writelane_b32 v20, s37, 5 -; VI-NEXT: v_writelane_b32 v20, s38, 6 -; VI-NEXT: v_writelane_b32 v20, s39, 7 -; VI-NEXT: v_writelane_b32 v20, s48, 8 -; VI-NEXT: v_writelane_b32 v20, s49, 9 -; VI-NEXT: v_writelane_b32 v20, s50, 10 -; VI-NEXT: v_writelane_b32 v20, s51, 11 -; VI-NEXT: v_writelane_b32 v20, s52, 12 -; VI-NEXT: v_writelane_b32 v20, s53, 13 -; VI-NEXT: v_writelane_b32 v20, s54, 14 -; VI-NEXT: v_writelane_b32 v20, s55, 15 -; VI-NEXT: v_writelane_b32 v20, s64, 16 -; VI-NEXT: v_writelane_b32 v20, s65, 17 -; VI-NEXT: v_writelane_b32 v20, s66, 18 -; VI-NEXT: v_writelane_b32 v20, s67, 19 -; VI-NEXT: v_writelane_b32 v20, s68, 20 -; VI-NEXT: v_writelane_b32 v20, s69, 21 -; VI-NEXT: v_writelane_b32 v20, s70, 22 -; VI-NEXT: v_writelane_b32 v20, s71, 23 -; VI-NEXT: v_writelane_b32 v20, s80, 24 -; VI-NEXT: v_writelane_b32 v20, s81, 25 -; VI-NEXT: v_writelane_b32 v20, s82, 26 -; VI-NEXT: v_writelane_b32 v20, s83, 27 -; VI-NEXT: v_writelane_b32 v20, s84, 28 -; VI-NEXT: v_writelane_b32 v20, s85, 29 -; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; VI-NEXT: v_writelane_b32 v20, s86, 30 -; VI-NEXT: v_readfirstlane_b32 s44, v1 -; VI-NEXT: v_readfirstlane_b32 s45, v2 -; VI-NEXT: v_readfirstlane_b32 s42, v3 -; VI-NEXT: v_readfirstlane_b32 s43, v4 -; VI-NEXT: v_readfirstlane_b32 s40, v5 -; VI-NEXT: v_readfirstlane_b32 s41, v6 -; VI-NEXT: v_readfirstlane_b32 s14, v7 -; VI-NEXT: v_readfirstlane_b32 s15, v8 -; VI-NEXT: v_readfirstlane_b32 s12, v9 -; VI-NEXT: v_readfirstlane_b32 s13, v10 -; VI-NEXT: v_readfirstlane_b32 s10, v11 -; VI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 2 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s55, 3 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 4 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s55, 5 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 6 +; SI-NEXT: v_writelane_b32 v22, s55, 7 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 8 +; SI-NEXT: v_writelane_b32 v22, s55, 9 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 10 +; SI-NEXT: v_writelane_b32 v22, s55, 11 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 12 +; SI-NEXT: v_writelane_b32 v22, s55, 13 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 14 +; SI-NEXT: v_writelane_b32 v22, s55, 15 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 16 +; SI-NEXT: v_writelane_b32 v22, s55, 17 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 18 +; SI-NEXT: v_writelane_b32 v22, s55, 19 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 20 +; SI-NEXT: v_writelane_b32 v22, s55, 21 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 22 +; SI-NEXT: v_writelane_b32 v22, s55, 23 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 24 +; SI-NEXT: v_writelane_b32 v22, s55, 25 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 26 +; SI-NEXT: v_writelane_b32 v22, s55, 27 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 28 +; SI-NEXT: v_writelane_b32 v22, s55, 29 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 30 +; SI-NEXT: v_writelane_b32 v22, s55, 31 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 32 +; SI-NEXT: v_writelane_b32 v22, s55, 33 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 34 +; SI-NEXT: v_writelane_b32 v22, s55, 35 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 36 +; SI-NEXT: v_writelane_b32 v22, s55, 37 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v22, s54, 38 +; SI-NEXT: v_writelane_b32 v22, s55, 39 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: s_branch .LBB57_2 +; +; VI-LABEL: bitcast_v16i64_to_v128i8_scalar: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; VI-NEXT: s_mov_b64 exec, s[4:5] +; VI-NEXT: v_writelane_b32 v20, s30, 0 +; VI-NEXT: v_writelane_b32 v20, s31, 1 +; VI-NEXT: v_writelane_b32 v20, s34, 2 +; VI-NEXT: v_writelane_b32 v20, s35, 3 +; VI-NEXT: v_writelane_b32 v20, s36, 4 +; VI-NEXT: v_writelane_b32 v20, s37, 5 +; VI-NEXT: v_writelane_b32 v20, s38, 6 +; VI-NEXT: v_writelane_b32 v20, s39, 7 +; VI-NEXT: v_writelane_b32 v20, s48, 8 +; VI-NEXT: v_writelane_b32 v20, s49, 9 +; VI-NEXT: v_writelane_b32 v20, s50, 10 +; VI-NEXT: v_writelane_b32 v20, s51, 11 +; VI-NEXT: v_writelane_b32 v20, s52, 12 +; VI-NEXT: v_writelane_b32 v20, s53, 13 +; VI-NEXT: v_writelane_b32 v20, s54, 14 +; VI-NEXT: v_writelane_b32 v20, s55, 15 +; VI-NEXT: v_writelane_b32 v20, s64, 16 +; VI-NEXT: v_writelane_b32 v20, s65, 17 +; VI-NEXT: v_writelane_b32 v20, s66, 18 +; VI-NEXT: v_writelane_b32 v20, s67, 19 +; VI-NEXT: v_writelane_b32 v20, s68, 20 +; VI-NEXT: v_writelane_b32 v20, s69, 21 +; VI-NEXT: v_writelane_b32 v20, s70, 22 +; VI-NEXT: v_writelane_b32 v20, s71, 23 +; VI-NEXT: v_writelane_b32 v20, s80, 24 +; VI-NEXT: v_writelane_b32 v20, s81, 25 +; VI-NEXT: v_writelane_b32 v20, s82, 26 +; VI-NEXT: v_writelane_b32 v20, s83, 27 +; VI-NEXT: v_writelane_b32 v20, s84, 28 +; VI-NEXT: v_writelane_b32 v20, s85, 29 +; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 +; VI-NEXT: v_writelane_b32 v20, s86, 30 +; VI-NEXT: v_readfirstlane_b32 s44, v1 +; VI-NEXT: v_readfirstlane_b32 s45, v2 +; VI-NEXT: v_readfirstlane_b32 s42, v3 +; VI-NEXT: v_readfirstlane_b32 s43, v4 +; VI-NEXT: v_readfirstlane_b32 s40, v5 +; VI-NEXT: v_readfirstlane_b32 s41, v6 +; VI-NEXT: v_readfirstlane_b32 s14, v7 +; VI-NEXT: v_readfirstlane_b32 s15, v8 +; VI-NEXT: v_readfirstlane_b32 s12, v9 +; VI-NEXT: v_readfirstlane_b32 s13, v10 +; VI-NEXT: v_readfirstlane_b32 s10, v11 +; VI-NEXT: v_readfirstlane_b32 s11, v12 ; VI-NEXT: v_readfirstlane_b32 s8, v13 ; VI-NEXT: v_readfirstlane_b32 s9, v14 ; VI-NEXT: v_readfirstlane_b32 s6, v15 @@ -105964,385 +107224,431 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v16i64_to_v64i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_readfirstlane_b32 s47, v1 -; SI-NEXT: v_readfirstlane_b32 s46, v2 -; SI-NEXT: v_readfirstlane_b32 s45, v3 -; SI-NEXT: v_readfirstlane_b32 s44, v4 -; SI-NEXT: v_readfirstlane_b32 s43, v5 -; SI-NEXT: v_readfirstlane_b32 s42, v6 -; SI-NEXT: v_readfirstlane_b32 s41, v7 -; SI-NEXT: v_readfirstlane_b32 s40, v8 -; SI-NEXT: v_readfirstlane_b32 s15, v9 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s12, v12 -; SI-NEXT: v_readfirstlane_b32 s11, v13 -; SI-NEXT: v_readfirstlane_b32 s10, v14 -; SI-NEXT: v_readfirstlane_b32 s9, v15 -; SI-NEXT: v_readfirstlane_b32 s8, v16 -; SI-NEXT: v_readfirstlane_b32 s7, v17 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v18 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_readfirstlane_b32 s44, v1 +; SI-NEXT: v_readfirstlane_b32 s45, v2 +; SI-NEXT: v_readfirstlane_b32 s42, v3 +; SI-NEXT: v_readfirstlane_b32 s43, v4 +; SI-NEXT: v_readfirstlane_b32 s40, v5 +; SI-NEXT: v_readfirstlane_b32 s41, v6 +; SI-NEXT: v_readfirstlane_b32 s14, v7 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s10, v11 +; SI-NEXT: v_readfirstlane_b32 s11, v12 +; SI-NEXT: v_readfirstlane_b32 s8, v13 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: v_readfirstlane_b32 s6, v15 +; SI-NEXT: v_readfirstlane_b32 s7, v16 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: v_writelane_b32 v20, s69, 21 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s47 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v11, s26 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v13, s22 -; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v15, s18 -; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s46, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s29, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s27, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s23, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s21, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s19, v15, 16 -; SI-NEXT: v_alignbit_b32 v16, s17, v16, 16 -; SI-NEXT: s_lshr_b32 s56, s6, 16 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s58, s10, 16 -; SI-NEXT: s_lshr_b32 s59, s12, 16 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s40, 16 -; SI-NEXT: s_lshr_b32 s62, s42, 16 -; SI-NEXT: s_lshr_b32 s63, s44, 16 -; SI-NEXT: s_lshr_b32 s72, s46, 16 -; SI-NEXT: s_lshr_b32 s73, s29, 16 -; SI-NEXT: s_lshr_b32 s74, s27, 16 -; SI-NEXT: s_lshr_b32 s75, s25, 16 -; SI-NEXT: s_lshr_b32 s76, s23, 16 -; SI-NEXT: s_lshr_b32 s77, s21, 16 -; SI-NEXT: s_lshr_b32 s78, s19, 16 -; SI-NEXT: s_lshr_b32 s79, s17, 16 +; SI-NEXT: s_lshr_b32 s38, s5, 16 +; SI-NEXT: s_lshr_b32 s39, s7, 16 +; SI-NEXT: s_lshr_b32 s48, s9, 16 +; SI-NEXT: s_lshr_b32 s49, s11, 16 +; SI-NEXT: s_lshr_b32 s50, s13, 16 +; SI-NEXT: s_lshr_b32 s51, s15, 16 +; SI-NEXT: s_lshr_b32 s52, s41, 16 +; SI-NEXT: s_lshr_b32 s53, s43, 16 +; SI-NEXT: s_lshr_b32 s54, s45, 16 +; SI-NEXT: s_lshr_b32 s55, s29, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 16 +; SI-NEXT: s_lshr_b32 s67, s21, 16 +; SI-NEXT: s_lshr_b32 s68, s19, 16 +; SI-NEXT: s_lshr_b32 s69, s17, 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 +; SI-NEXT: s_add_u32 s40, s40, 3 +; SI-NEXT: s_addc_u32 s41, s41, 0 +; SI-NEXT: s_add_u32 s42, s42, 3 +; SI-NEXT: s_addc_u32 s43, s43, 0 +; SI-NEXT: s_add_u32 s44, s44, 3 +; SI-NEXT: s_addc_u32 s45, s45, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s47, s47, 3 -; SI-NEXT: s_addc_u32 s46, s46, 0 -; SI-NEXT: s_add_u32 s45, s45, 3 -; SI-NEXT: s_addc_u32 s44, s44, 0 -; SI-NEXT: s_add_u32 s43, s43, 3 -; SI-NEXT: s_addc_u32 s42, s42, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s47 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v11, s26 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v13, s22 -; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v15, s18 -; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s46, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s29, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s27, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s23, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s21, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s19, v15, 16 -; SI-NEXT: v_alignbit_b32 v16, s17, v16, 16 -; SI-NEXT: s_lshr_b32 s56, s6, 16 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s58, s10, 16 -; SI-NEXT: s_lshr_b32 s59, s12, 16 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s40, 16 -; SI-NEXT: s_lshr_b32 s62, s42, 16 -; SI-NEXT: s_lshr_b32 s63, s44, 16 -; SI-NEXT: s_lshr_b32 s72, s46, 16 -; SI-NEXT: s_lshr_b32 s73, s29, 16 -; SI-NEXT: s_lshr_b32 s74, s27, 16 -; SI-NEXT: s_lshr_b32 s75, s25, 16 -; SI-NEXT: s_lshr_b32 s76, s23, 16 -; SI-NEXT: s_lshr_b32 s77, s21, 16 -; SI-NEXT: s_lshr_b32 s78, s19, 16 -; SI-NEXT: s_lshr_b32 s79, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s38, s5, 16 +; SI-NEXT: s_lshr_b32 s39, s7, 16 +; SI-NEXT: s_lshr_b32 s48, s9, 16 +; SI-NEXT: s_lshr_b32 s49, s11, 16 +; SI-NEXT: s_lshr_b32 s50, s13, 16 +; SI-NEXT: s_lshr_b32 s51, s15, 16 +; SI-NEXT: s_lshr_b32 s52, s41, 16 +; SI-NEXT: s_lshr_b32 s53, s43, 16 +; SI-NEXT: s_lshr_b32 s54, s45, 16 +; SI-NEXT: s_lshr_b32 s55, s29, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 16 +; SI-NEXT: s_lshr_b32 s67, s21, 16 +; SI-NEXT: s_lshr_b32 s68, s19, 16 +; SI-NEXT: s_lshr_b32 s69, s17, 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 ; SI-NEXT: .LBB69_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_or_b32_e32 v16, s4, v16 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s79, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_store_dword v16, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s47, s36, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s47 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s69, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s34, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s68, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s30, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s78, 16 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v16, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v16, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s67, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s94, 16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s77, 16 -; SI-NEXT: buffer_store_dword v16, v15, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s66, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s75, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s65, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s47, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s64, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s46, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s45, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s44, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s44, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xffff +; SI-NEXT: s_lshl_b32 s17, s54, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr79 -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr55 ; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr77 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr75 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr52 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr46 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v16i64_to_v64i16_scalar: @@ -112766,656 +114072,1312 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a, ; SI-LABEL: bitcast_v16f64_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v63, s30, 0 +; SI-NEXT: v_writelane_b32 v63, s31, 1 +; SI-NEXT: v_writelane_b32 v63, s34, 2 +; SI-NEXT: v_writelane_b32 v63, s35, 3 +; SI-NEXT: v_writelane_b32 v63, s36, 4 +; SI-NEXT: v_writelane_b32 v63, s37, 5 +; SI-NEXT: v_writelane_b32 v63, s38, 6 +; SI-NEXT: v_writelane_b32 v63, s39, 7 +; SI-NEXT: v_writelane_b32 v63, s48, 8 +; SI-NEXT: v_writelane_b32 v63, s49, 9 +; SI-NEXT: v_writelane_b32 v63, s50, 10 +; SI-NEXT: v_writelane_b32 v63, s51, 11 +; SI-NEXT: v_writelane_b32 v63, s52, 12 +; SI-NEXT: v_writelane_b32 v63, s53, 13 +; SI-NEXT: v_writelane_b32 v63, s54, 14 +; SI-NEXT: v_writelane_b32 v63, s55, 15 +; SI-NEXT: v_writelane_b32 v63, s64, 16 +; SI-NEXT: v_writelane_b32 v63, s65, 17 +; SI-NEXT: v_writelane_b32 v63, s66, 18 +; SI-NEXT: v_writelane_b32 v63, s67, 19 +; SI-NEXT: v_writelane_b32 v63, s68, 20 +; SI-NEXT: v_writelane_b32 v63, s69, 21 +; SI-NEXT: v_writelane_b32 v63, s70, 22 +; SI-NEXT: v_writelane_b32 v63, s71, 23 +; SI-NEXT: v_writelane_b32 v63, s80, 24 +; SI-NEXT: v_writelane_b32 v63, s81, 25 +; SI-NEXT: v_writelane_b32 v63, s82, 26 +; SI-NEXT: v_writelane_b32 v63, s83, 27 +; SI-NEXT: v_writelane_b32 v63, s84, 28 +; SI-NEXT: v_writelane_b32 v63, s85, 29 +; SI-NEXT: v_writelane_b32 v63, s86, 30 +; SI-NEXT: v_writelane_b32 v63, s87, 31 +; SI-NEXT: v_writelane_b32 v63, s96, 32 +; SI-NEXT: v_writelane_b32 v63, s97, 33 +; SI-NEXT: v_writelane_b32 v63, s98, 34 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_mov_b32_e32 v31, s16 -; SI-NEXT: v_mov_b32_e32 v32, s17 -; SI-NEXT: v_mov_b32_e32 v29, s18 -; SI-NEXT: v_mov_b32_e32 v30, s19 -; SI-NEXT: v_mov_b32_e32 v27, s20 -; SI-NEXT: v_mov_b32_e32 v28, s21 -; SI-NEXT: v_mov_b32_e32 v25, s22 -; SI-NEXT: v_mov_b32_e32 v26, s23 -; SI-NEXT: v_mov_b32_e32 v23, s24 -; SI-NEXT: v_mov_b32_e32 v24, s25 -; SI-NEXT: v_mov_b32_e32 v21, s26 -; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v19, s28 -; SI-NEXT: v_mov_b32_e32 v20, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB73_4 +; SI-NEXT: v_writelane_b32 v63, s99, 35 +; SI-NEXT: v_readfirstlane_b32 s4, v1 +; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_readfirstlane_b32 s6, v3 +; SI-NEXT: v_readfirstlane_b32 s7, v4 +; SI-NEXT: v_readfirstlane_b32 s8, v5 +; SI-NEXT: v_readfirstlane_b32 s9, v6 +; SI-NEXT: v_readfirstlane_b32 s10, v7 +; SI-NEXT: v_readfirstlane_b32 s11, v8 +; SI-NEXT: v_readfirstlane_b32 s12, v9 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: v_readfirstlane_b32 s14, v11 +; SI-NEXT: v_readfirstlane_b32 s15, v12 +; SI-NEXT: v_readfirstlane_b32 s40, v13 +; SI-NEXT: v_readfirstlane_b32 s41, v14 +; SI-NEXT: v_readfirstlane_b32 s42, v15 +; SI-NEXT: v_readfirstlane_b32 s43, v16 +; SI-NEXT: v_readfirstlane_b32 s44, v17 +; SI-NEXT: s_and_b64 s[46:47], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s45, v18 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr61 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane +; SI-NEXT: s_cbranch_scc0 .LBB73_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: s_lshr_b32 s46, s45, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 34 +; SI-NEXT: s_lshr_b32 s46, s45, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 35 +; SI-NEXT: s_lshr_b32 s46, s45, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 36 +; SI-NEXT: s_lshr_b32 s46, s43, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 37 +; SI-NEXT: s_lshr_b32 s46, s43, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 38 +; SI-NEXT: s_lshr_b32 s46, s43, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 39 +; SI-NEXT: s_lshr_b32 s46, s41, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 40 +; SI-NEXT: s_lshr_b32 s46, s41, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 41 +; SI-NEXT: s_lshr_b32 s46, s41, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 42 +; SI-NEXT: s_lshr_b32 s46, s15, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 43 +; SI-NEXT: s_lshr_b32 s46, s15, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 44 +; SI-NEXT: s_lshr_b32 s46, s15, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 45 +; SI-NEXT: s_lshr_b32 s46, s13, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 46 +; SI-NEXT: s_lshr_b32 s46, s13, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 47 +; SI-NEXT: s_lshr_b32 s46, s13, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 48 +; SI-NEXT: s_lshr_b32 s46, s11, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 49 +; SI-NEXT: s_lshr_b32 s46, s11, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 50 +; SI-NEXT: s_lshr_b32 s46, s11, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 51 +; SI-NEXT: s_lshr_b32 s46, s9, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 52 +; SI-NEXT: s_lshr_b32 s46, s9, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 53 +; SI-NEXT: s_lshr_b32 s46, s9, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 54 +; SI-NEXT: s_lshr_b32 s46, s7, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 55 +; SI-NEXT: s_lshr_b32 s46, s7, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 56 +; SI-NEXT: s_lshr_b32 s46, s7, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 57 +; SI-NEXT: s_lshr_b32 s46, s5, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 58 +; SI-NEXT: s_lshr_b32 s46, s5, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 59 +; SI-NEXT: s_lshr_b32 s46, s5, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 60 +; SI-NEXT: s_lshr_b32 s46, s29, 24 +; SI-NEXT: v_writelane_b32 v61, s46, 61 +; SI-NEXT: s_lshr_b32 s46, s29, 16 +; SI-NEXT: v_writelane_b32 v61, s46, 62 +; SI-NEXT: s_lshr_b32 s46, s29, 8 +; SI-NEXT: v_writelane_b32 v61, s46, 63 +; SI-NEXT: s_lshr_b32 s46, s27, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 0 +; SI-NEXT: s_lshr_b32 s46, s27, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 1 +; SI-NEXT: s_lshr_b32 s46, s27, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 2 +; SI-NEXT: s_lshr_b32 s46, s25, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 3 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 4 +; SI-NEXT: s_lshr_b32 s46, s25, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 5 +; SI-NEXT: s_lshr_b32 s46, s23, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 6 +; SI-NEXT: s_lshr_b32 s46, s23, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 7 +; SI-NEXT: s_lshr_b32 s46, s23, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 8 +; SI-NEXT: s_lshr_b32 s46, s21, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 9 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 10 +; SI-NEXT: s_lshr_b32 s46, s21, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 11 +; SI-NEXT: s_lshr_b32 s46, s19, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 12 +; SI-NEXT: s_lshr_b32 s46, s19, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 13 +; SI-NEXT: s_lshr_b32 s46, s19, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 14 +; SI-NEXT: s_lshr_b32 s46, s17, 24 +; SI-NEXT: v_writelane_b32 v62, s46, 15 +; SI-NEXT: s_lshr_b32 s46, s17, 16 +; SI-NEXT: v_writelane_b32 v62, s46, 16 +; SI-NEXT: s_lshr_b32 s46, s17, 8 +; SI-NEXT: v_writelane_b32 v62, s46, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 32 +; SI-NEXT: v_writelane_b32 v61, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 30 +; SI-NEXT: v_writelane_b32 v61, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 28 +; SI-NEXT: v_writelane_b32 v61, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 26 +; SI-NEXT: v_writelane_b32 v61, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 24 +; SI-NEXT: v_writelane_b32 v61, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 22 +; SI-NEXT: v_writelane_b32 v61, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 20 +; SI-NEXT: v_writelane_b32 v61, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 18 +; SI-NEXT: v_writelane_b32 v61, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 16 +; SI-NEXT: v_writelane_b32 v61, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 14 +; SI-NEXT: v_writelane_b32 v61, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 12 +; SI-NEXT: v_writelane_b32 v61, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 10 +; SI-NEXT: v_writelane_b32 v61, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 8 +; SI-NEXT: v_writelane_b32 v61, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 6 +; SI-NEXT: v_writelane_b32 v61, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v61, s46, 4 +; SI-NEXT: v_writelane_b32 v61, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v61, s46, 2 +; SI-NEXT: v_writelane_b32 v61, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v61, s46, 0 +; SI-NEXT: s_lshr_b64 s[48:49], s[44:45], 16 +; SI-NEXT: v_writelane_b32 v61, s47, 1 +; SI-NEXT: s_lshr_b64 s[50:51], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[52:53], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[54:55], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[64:65], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[84:85], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[86:87], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[98:99], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 +; SI-NEXT: s_cbranch_execnz .LBB73_4 +; SI-NEXT: .LBB73_2: ; %cmp.true +; SI-NEXT: v_add_f64 v[5:6], s[40:41], 1.0 +; SI-NEXT: v_add_f64 v[7:8], s[14:15], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v6 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v8 +; SI-NEXT: v_add_f64 v[9:10], s[12:13], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v10 +; SI-NEXT: v_add_f64 v[11:12], s[10:11], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v10 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v10 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v12 +; SI-NEXT: v_add_f64 v[13:14], s[8:9], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v12 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v12 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v14 +; SI-NEXT: v_add_f64 v[15:16], s[6:7], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v14 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v14 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v16 +; SI-NEXT: v_add_f64 v[17:18], s[4:5], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v18 +; SI-NEXT: v_add_f64 v[19:20], s[28:29], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v18 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v18 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v20 +; SI-NEXT: v_add_f64 v[21:22], s[26:27], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v20 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v20 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v22 +; SI-NEXT: v_add_f64 v[23:24], s[24:25], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v22 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v22 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v2 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v24 +; SI-NEXT: v_add_f64 v[38:39], s[22:23], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v2 -; SI-NEXT: v_alignbit_b32 v38, v28, v27, 24 -; SI-NEXT: v_alignbit_b32 v48, v28, v27, 16 -; SI-NEXT: v_alignbit_b32 v50, v28, v27, 8 -; SI-NEXT: v_alignbit_b32 v52, v30, v29, 24 -; SI-NEXT: v_alignbit_b32 v54, v30, v29, 16 -; SI-NEXT: v_alignbit_b32 v40, v30, v29, 8 -; SI-NEXT: v_alignbit_b32 v42, v32, v31, 24 -; SI-NEXT: v_alignbit_b32 v44, v32, v31, 16 -; SI-NEXT: v_alignbit_b32 v46, v32, v31, 8 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v20 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v22 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v55, 8, v22 -; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v24 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v24 -; SI-NEXT: v_lshrrev_b32_e32 v47, 24, v26 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v26 -; SI-NEXT: v_lshrrev_b32_e32 v58, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v28 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v30 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v30 +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v39 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v32 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v32 -; SI-NEXT: v_lshrrev_b32_e32 v35, 8, v32 -; SI-NEXT: s_cbranch_execnz .LBB73_3 -; SI-NEXT: .LBB73_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v39 +; SI-NEXT: v_add_f64 v[52:53], s[20:21], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v39 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v53 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 24 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v53 +; SI-NEXT: v_add_f64 v[44:45], s[18:19], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v53 +; SI-NEXT: v_add_f64 v[1:2], s[44:45], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v45 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 24 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v45 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[1:2], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[1:2], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 24 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[1:2], 8 +; SI-NEXT: v_add_f64 v[3:4], s[42:43], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 24 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[5:6], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v10, v9, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[5:6], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 24 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[5:6], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[7:8], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v8, v7, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[7:8], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 24 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[7:8], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 24 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v4, v3, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 24 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[13:14], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[13:14], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 24 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[13:14], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[15:16], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v20, v19, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[15:16], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 24 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[15:16], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[17:18], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v22, v21, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[17:18], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 24 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[25:26], v[17:18], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[19:20], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[19:20], 16 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[19:20], 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[21:22], 24 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[47:48], v[23:24], 16 +; SI-NEXT: v_add_f64 v[58:59], s[16:17], 1.0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[35:36], v[21:22], 8 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 8 +; SI-NEXT: v_lshrrev_b32_e32 v27, 24, v2 +; SI-NEXT: v_lshr_b64 v[36:37], v[23:24], 24 +; SI-NEXT: v_lshr_b64 v[49:50], v[38:39], 24 +; SI-NEXT: v_lshr_b64 v[40:41], v[38:39], 8 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[25:26], v[44:45], 8 +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v2 +; SI-NEXT: v_mov_b32_e32 v37, v27 +; SI-NEXT: v_lshr_b64 v[50:51], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[41:42], v[52:53], 24 +; SI-NEXT: v_lshr_b64 v[54:55], v[52:53], 8 +; SI-NEXT: v_lshr_b64 v[26:27], v[58:59], 24 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v2 +; SI-NEXT: v_mov_b32_e32 v51, v28 +; SI-NEXT: v_lshr_b64 v[42:43], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[55:56], v[44:45], 24 +; SI-NEXT: v_lshr_b64 v[27:28], v[58:59], 16 +; SI-NEXT: v_mov_b32_e32 v43, v29 +; SI-NEXT: v_lshr_b64 v[56:57], v[44:45], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[58:59], 8 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 8, v4 +; SI-NEXT: v_lshrrev_b32_e32 v34, 24, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v45 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v59 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v59 +; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v59 +; SI-NEXT: v_lshrrev_b32_e32 v57, 24, v4 +; SI-NEXT: s_branch .LBB73_5 +; SI-NEXT: .LBB73_3: +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 0 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 1 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 2 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 3 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 4 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 5 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 6 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 7 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 8 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 9 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 10 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 11 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 12 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 13 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 14 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 15 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 16 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 17 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 18 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 19 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 20 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 21 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 22 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 23 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 24 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 25 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 26 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s49, 27 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 28 +; SI-NEXT: v_writelane_b32 v61, s49, 29 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 30 +; SI-NEXT: v_writelane_b32 v61, s49, 31 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: v_writelane_b32 v61, s48, 32 +; SI-NEXT: v_writelane_b32 v61, s49, 33 +; SI-NEXT: ; kill: killed $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: s_branch .LBB73_2 +; SI-NEXT: .LBB73_4: +; SI-NEXT: v_mov_b32_e32 v17, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 34 +; SI-NEXT: v_mov_b32_e32 v37, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 35 +; SI-NEXT: v_mov_b32_e32 v51, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 36 +; SI-NEXT: v_mov_b32_e32 v43, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 37 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v57, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 38 +; SI-NEXT: v_mov_b32_e32 v33, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 39 +; SI-NEXT: v_mov_b32_e32 v30, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 40 +; SI-NEXT: v_mov_b32_e32 v34, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 41 +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 42 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 43 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v24, v23, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 44 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 24 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 45 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 46 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v33, v26, v25, 8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 47 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 48 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 49 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 50 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 51 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v16 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 52 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 53 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 54 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v14 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 55 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 56 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 57 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v12 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 58 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 59 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 60 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v10 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 61 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 62 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 63 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v8 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 0 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 1 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 2 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v6 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 3 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 4 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 5 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v4 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 6 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v2 -; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0 -; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 7 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 8 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v2 -; SI-NEXT: v_alignbit_b32 v38, v28, v27, 24 -; SI-NEXT: v_alignbit_b32 v48, v28, v27, 16 -; SI-NEXT: v_alignbit_b32 v50, v28, v27, 8 -; SI-NEXT: v_alignbit_b32 v52, v30, v29, 24 -; SI-NEXT: v_alignbit_b32 v54, v30, v29, 16 -; SI-NEXT: v_alignbit_b32 v40, v30, v29, 8 -; SI-NEXT: v_alignbit_b32 v42, v32, v31, 24 -; SI-NEXT: v_alignbit_b32 v44, v32, v31, 16 -; SI-NEXT: v_alignbit_b32 v46, v32, v31, 8 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v20 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v22 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v55, 8, v22 -; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v24 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v24 -; SI-NEXT: v_lshrrev_b32_e32 v47, 24, v26 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v26 -; SI-NEXT: v_lshrrev_b32_e32 v58, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v28 -; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v30 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v30 +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 9 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v32 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v32 -; SI-NEXT: v_lshrrev_b32_e32 v35, 8, v32 -; SI-NEXT: .LBB73_3: ; %end -; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v46 -; SI-NEXT: v_and_b32_e32 v31, 0xff, v31 -; SI-NEXT: v_and_b32_e32 v44, 0xff, v44 -; SI-NEXT: v_or_b32_e32 v31, v31, v46 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44 -; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v42 -; SI-NEXT: v_or_b32_e32 v42, v42, v44 -; SI-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; SI-NEXT: v_or_b32_e32 v31, v31, v42 -; SI-NEXT: buffer_store_dword v31, v0, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 10 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v31, 0xff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v35 -; SI-NEXT: v_or_b32_e32 v31, v31, v32 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v33 -; SI-NEXT: v_or_b32_e32 v32, v33, v32 -; SI-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; SI-NEXT: v_or_b32_e32 v31, v31, v32 -; SI-NEXT: v_add_i32_e32 v32, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v31, v32, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 11 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v31, 8, v40 -; SI-NEXT: v_and_b32_e32 v29, 0xff, v29 -; SI-NEXT: v_or_b32_e32 v29, v29, v31 -; SI-NEXT: v_and_b32_e32 v31, 0xff, v54 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v32, 24, v52 -; SI-NEXT: v_or_b32_e32 v31, v32, v31 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_or_b32_e32 v29, v29, v31 -; SI-NEXT: v_add_i32_e32 v31, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v29, v31, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 12 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v63 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_and_b32_e32 v30, 0xff, v62 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_lshlrev_b32_e32 v31, 24, v61 -; SI-NEXT: v_or_b32_e32 v30, v31, v30 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 13 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v29, 8, v50 +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_mov_b32_e32 v29, s46 +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s98 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s96 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s86 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s84 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s82 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s80 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s70 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s68 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s66 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s64 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s54 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s52 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s50 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s4, v62, 14 +; SI-NEXT: v_mov_b32_e32 v60, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 15 +; SI-NEXT: v_mov_b32_e32 v31, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 16 +; SI-NEXT: v_mov_b32_e32 v32, s4 +; SI-NEXT: v_readlane_b32 s4, v62, 17 +; SI-NEXT: v_mov_b32_e32 v18, s5 +; SI-NEXT: v_mov_b32_e32 v46, s4 +; SI-NEXT: v_readlane_b32 s4, v61, 0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 1 +; SI-NEXT: v_readlane_b32 s4, v61, 2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 3 +; SI-NEXT: v_readlane_b32 s4, v61, 4 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 5 +; SI-NEXT: v_readlane_b32 s4, v61, 6 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 7 +; SI-NEXT: v_readlane_b32 s4, v61, 8 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 9 +; SI-NEXT: v_readlane_b32 s4, v61, 10 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 11 +; SI-NEXT: v_readlane_b32 s4, v61, 12 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 13 +; SI-NEXT: v_readlane_b32 s4, v61, 14 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 15 +; SI-NEXT: v_readlane_b32 s4, v61, 16 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 17 +; SI-NEXT: v_readlane_b32 s4, v61, 18 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 19 +; SI-NEXT: v_readlane_b32 s4, v61, 20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 21 +; SI-NEXT: v_readlane_b32 s4, v61, 22 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 23 +; SI-NEXT: v_readlane_b32 s4, v61, 24 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 25 +; SI-NEXT: v_readlane_b32 s4, v61, 26 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 27 +; SI-NEXT: v_readlane_b32 s4, v61, 28 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 29 +; SI-NEXT: v_readlane_b32 s4, v61, 30 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s48 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_readlane_b32 s5, v61, 31 +; SI-NEXT: v_readlane_b32 s4, v61, 32 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v29, s4 +; SI-NEXT: v_mov_b32_e32 v59, s17 +; SI-NEXT: v_mov_b32_e32 v58, s16 +; SI-NEXT: v_mov_b32_e32 v45, s19 +; SI-NEXT: v_mov_b32_e32 v44, s18 +; SI-NEXT: v_mov_b32_e32 v53, s21 +; SI-NEXT: v_mov_b32_e32 v52, s20 +; SI-NEXT: v_mov_b32_e32 v39, s23 +; SI-NEXT: v_mov_b32_e32 v38, s22 +; SI-NEXT: v_mov_b32_e32 v24, s25 +; SI-NEXT: v_mov_b32_e32 v23, s24 +; SI-NEXT: v_mov_b32_e32 v22, s27 +; SI-NEXT: v_mov_b32_e32 v21, s26 +; SI-NEXT: v_mov_b32_e32 v20, s29 +; SI-NEXT: v_mov_b32_e32 v19, s28 +; SI-NEXT: v_mov_b32_e32 v16, s7 +; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v14, s9 +; SI-NEXT: v_mov_b32_e32 v13, s8 +; SI-NEXT: v_mov_b32_e32 v12, s11 +; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v10, s13 +; SI-NEXT: v_mov_b32_e32 v9, s12 +; SI-NEXT: v_mov_b32_e32 v8, s15 +; SI-NEXT: v_mov_b32_e32 v7, s14 +; SI-NEXT: v_mov_b32_e32 v6, s41 +; SI-NEXT: v_mov_b32_e32 v5, s40 +; SI-NEXT: v_mov_b32_e32 v4, s43 +; SI-NEXT: v_mov_b32_e32 v3, s42 +; SI-NEXT: v_mov_b32_e32 v2, s45 +; SI-NEXT: v_mov_b32_e32 v1, s44 +; SI-NEXT: v_mov_b32_e32 v28, s38 +; SI-NEXT: v_mov_b32_e32 v27, s36 +; SI-NEXT: v_mov_b32_e32 v26, s34 +; SI-NEXT: v_mov_b32_e32 v25, s30 +; SI-NEXT: v_mov_b32_e32 v56, s94 +; SI-NEXT: v_mov_b32_e32 v55, s92 +; SI-NEXT: v_mov_b32_e32 v54, s90 +; SI-NEXT: v_mov_b32_e32 v42, s88 +; SI-NEXT: v_mov_b32_e32 v41, s78 +; SI-NEXT: v_mov_b32_e32 v40, s76 +; SI-NEXT: v_mov_b32_e32 v50, s74 +; SI-NEXT: v_mov_b32_e32 v49, s72 +; SI-NEXT: v_mov_b32_e32 v48, s62 +; SI-NEXT: v_mov_b32_e32 v47, s60 +; SI-NEXT: v_mov_b32_e32 v36, s58 +; SI-NEXT: v_mov_b32_e32 v35, s56 +; SI-NEXT: v_readlane_b32 s5, v61, 33 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: .LBB73_5: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v28 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_and_b32_e32 v29, 0xff, v58 ; SI-NEXT: v_and_b32_e32 v27, 0xff, v27 -; SI-NEXT: v_or_b32_e32 v27, v27, v29 -; SI-NEXT: v_and_b32_e32 v29, 0xff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v38 -; SI-NEXT: v_or_b32_e32 v29, v30, v29 -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_or_b32_e32 v27, v27, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v27, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v27, 0xff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v60 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: v_and_b32_e32 v28, 0xff, v59 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v58 ; SI-NEXT: v_or_b32_e32 v28, v29, v28 -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: v_add_i32_e32 v28, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v26 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: buffer_store_dword v26, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v25, 0xff, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v59 +; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v46 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_and_b32_e32 v27, 0xff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v31 +; SI-NEXT: v_or_b32_e32 v27, v28, v27 +; SI-NEXT: v_and_b32_e32 v26, 0xffff, v26 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_add_i32_e32 v27, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v26, 0xff, v44 +; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v56 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v55 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v25, 0xff, v45 +; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v60 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 ; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 ; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s84, v63, 28 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s70, v63, 22 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s64, v63, 16 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s50, v63, 10 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s36, v63, 4 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s34, v63, 2 +; SI-NEXT: v_readlane_b32 s31, v63, 1 +; SI-NEXT: v_readlane_b32 s30, v63, 0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v27 -; SI-NEXT: v_or_b32_e32 v25, v25, v27 -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v26, 0xff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v54 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v52 +; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v42 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v41 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v25, 0xff, v53 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v26 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v27, 0xff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: v_or_b32_e32 v27, v28, v27 -; SI-NEXT: v_or_b32_e32 v25, v25, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v25, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v57 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 -; SI-NEXT: v_and_b32_e32 v26, 0xff, v56 +; SI-NEXT: v_add_i32_e32 v26, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v40 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v38 +; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v50 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v49 ; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v47 ; SI-NEXT: v_or_b32_e32 v26, v27, v26 ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 -; SI-NEXT: v_add_i32_e32 v26, vcc, 28, v0 +; SI-NEXT: v_add_i32_e32 v26, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v25, 0xff, v39 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v26 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v26 +; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v25, 0xff, v25 +; SI-NEXT: v_and_b32_e32 v26, 0xff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; SI-NEXT: v_or_b32_e32 v26, v27, v26 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v48 +; SI-NEXT: v_or_b32_e32 v23, v23, v25 +; SI-NEXT: v_and_b32_e32 v25, 0xff, v47 +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v36 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 ; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v45 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v24 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_and_b32_e32 v24, 0xff, v43 +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v25 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 ; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v41 ; SI-NEXT: v_or_b32_e32 v24, v25, v24 -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v35 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 ; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 ; SI-NEXT: v_or_b32_e32 v23, v24, v23 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 @@ -113423,28 +115385,37 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a, ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v55 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_and_b32_e32 v22, 0xff, v53 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v51 ; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v22 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 ; SI-NEXT: v_or_b32_e32 v21, v22, v21 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 @@ -113452,518 +115423,370 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a, ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v49 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v39 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v21 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 ; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v37 ; SI-NEXT: v_or_b32_e32 v20, v21, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v19 -; SI-NEXT: v_or_b32_e32 v1, v1, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v17, v17, v19 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v20 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 ; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 ; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_or_b32_e32 v1, v1, v19 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v19, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v17, 0xff, v18 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v19, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_or_b32_e32 v18, v19, v18 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 +; SI-NEXT: v_add_i32_e32 v18, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v18 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_or_b32_e32 v17, v18, v17 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 +; SI-NEXT: v_add_i32_e32 v17, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v4 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v15, 0xff, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v16 +; SI-NEXT: v_or_b32_e32 v15, v15, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_or_b32_e32 v16, v17, v16 +; SI-NEXT: v_or_b32_e32 v15, v15, v16 +; SI-NEXT: v_add_i32_e32 v16, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v5 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_or_b32_e32 v15, v16, v15 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 +; SI-NEXT: v_add_i32_e32 v15, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v6 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xff, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v15 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_or_b32_e32 v14, v15, v14 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v7 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 +; SI-NEXT: v_add_i32_e32 v13, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v8 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xff, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_or_b32_e32 v12, v13, v12 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: v_add_i32_e32 v12, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v9 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v12 +; SI-NEXT: v_or_b32_e32 v11, v12, v11 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 +; SI-NEXT: v_add_i32_e32 v11, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v10 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xff, v10 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; SI-NEXT: v_or_b32_e32 v9, v9, v10 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: v_or_b32_e32 v9, v9, v10 +; SI-NEXT: v_add_i32_e32 v10, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xff, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; SI-NEXT: v_or_b32_e32 v7, v7, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_or_b32_e32 v8, v9, v8 +; SI-NEXT: v_or_b32_e32 v7, v7, v8 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v14 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v15 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v34 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v30 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v33 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v57 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 0x74, v0 +; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v18 -; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v36 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v51 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v37 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB73_4: -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; kill: killed $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: s_branch .LBB73_2 ; ; VI-LABEL: bitcast_v16f64_to_v128i8_scalar: ; VI: ; %bb.0: @@ -140462,219 +142285,224 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB85_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v18 ; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v34, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v35, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v36, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v38, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v39, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v49, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v51, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v40, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v42, v24, v23, 16 -; SI-NEXT: v_alignbit_b32 v45, v26, v25, 16 -; SI-NEXT: v_alignbit_b32 v47, v28, v27, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v30, v29, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v60, v32, v31, 16 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[33:34], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[39:40], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[40:41], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[41:42], v[27:28], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[29:30], 16 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v32 +; SI-NEXT: v_lshr_b64 v[54:55], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[43:44], v[31:32], 16 ; SI-NEXT: s_cbranch_execnz .LBB85_3 ; SI-NEXT: .LBB85_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0 -; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 ; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_alignbit_b32 v33, v18, v17, 16 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v18 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v34, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v35, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v36, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v38, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v39, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v49, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v51, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v40, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v42, v24, v23, 16 -; SI-NEXT: v_alignbit_b32 v45, v26, v25, 16 -; SI-NEXT: v_alignbit_b32 v47, v28, v27, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v30, v29, 16 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v60, v32, v31, 16 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v14 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v10 +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[33:34], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[13:14], 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 +; SI-NEXT: v_lshr_b64 v[38:39], v[7:8], 16 +; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_lshr_b64 v[39:40], v[23:24], 16 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 +; SI-NEXT: v_lshr_b64 v[40:41], v[25:26], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[31:32], v[31:32], 1.0 +; SI-NEXT: v_lshr_b64 v[41:42], v[27:28], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[42:43], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[43:44], v[31:32], 16 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v32 ; SI-NEXT: .LBB85_3: ; %end -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v60 +; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v43 ; SI-NEXT: v_and_b32_e32 v31, 0xffff, v31 -; SI-NEXT: v_or_b32_e32 v31, v31, v60 +; SI-NEXT: v_or_b32_e32 v31, v31, v50 ; SI-NEXT: buffer_store_dword v31, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v31, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v57 ; SI-NEXT: v_or_b32_e32 v31, v31, v32 ; SI-NEXT: v_add_i32_e32 v32, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v31, v32, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v58 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v42 ; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 ; SI-NEXT: v_or_b32_e32 v29, v29, v31 ; SI-NEXT: v_add_i32_e32 v31, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v29, v31, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v29, 0xffff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v63 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v56 ; SI-NEXT: v_or_b32_e32 v29, v29, v30 ; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v41 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 ; SI-NEXT: v_or_b32_e32 v27, v27, v29 ; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v27, v29, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v62 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v47 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 ; SI-NEXT: v_add_i32_e32 v28, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v40 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_or_b32_e32 v25, v25, v27 ; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v61 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v46 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 ; SI-NEXT: v_add_i32_e32 v26, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v39 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v59 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v45 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v57 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v63 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v49 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v62 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v61 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v60 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v59 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140686,7 +142514,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v58 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140696,9 +142524,11 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140708,9 +142538,11 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140720,9 +142552,11 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -140732,25 +142566,27 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v17 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload @@ -140771,39 +142607,43 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a, ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB85_4: -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr60 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr58 ; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr56 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr55 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; kill: killed $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; kill: killed $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: s_branch .LBB85_2 ; ; VI-LABEL: bitcast_v16f64_to_v64i16_scalar: @@ -162237,22 +164077,24 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a ; SI-LABEL: bitcast_v64bf16_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80 ; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 @@ -162265,596 +164107,622 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:36 ; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:32 ; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:72 +; SI-NEXT: v_writelane_b32 v63, s30, 0 +; SI-NEXT: v_writelane_b32 v63, s31, 1 +; SI-NEXT: v_writelane_b32 v63, s34, 2 +; SI-NEXT: v_writelane_b32 v63, s35, 3 +; SI-NEXT: v_writelane_b32 v63, s36, 4 +; SI-NEXT: v_writelane_b32 v63, s37, 5 +; SI-NEXT: v_writelane_b32 v63, s38, 6 +; SI-NEXT: v_writelane_b32 v63, s39, 7 +; SI-NEXT: v_writelane_b32 v63, s48, 8 +; SI-NEXT: v_writelane_b32 v63, s49, 9 +; SI-NEXT: v_writelane_b32 v63, s50, 10 +; SI-NEXT: v_writelane_b32 v63, s51, 11 +; SI-NEXT: v_writelane_b32 v63, s52, 12 +; SI-NEXT: v_writelane_b32 v63, s53, 13 +; SI-NEXT: v_writelane_b32 v63, s54, 14 +; SI-NEXT: v_writelane_b32 v63, s55, 15 +; SI-NEXT: v_writelane_b32 v63, s64, 16 +; SI-NEXT: v_writelane_b32 v63, s65, 17 +; SI-NEXT: v_writelane_b32 v63, s66, 18 +; SI-NEXT: v_writelane_b32 v63, s67, 19 +; SI-NEXT: v_writelane_b32 v63, s68, 20 +; SI-NEXT: v_writelane_b32 v63, s69, 21 +; SI-NEXT: v_writelane_b32 v63, s70, 22 +; SI-NEXT: v_writelane_b32 v63, s71, 23 +; SI-NEXT: v_writelane_b32 v63, s80, 24 +; SI-NEXT: v_writelane_b32 v63, s81, 25 +; SI-NEXT: v_writelane_b32 v63, s82, 26 +; SI-NEXT: v_writelane_b32 v63, s83, 27 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:72 -; SI-NEXT: v_mov_b32_e32 v42, v22 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_mul_f32_e32 v60, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v18 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v10 +; SI-NEXT: v_writelane_b32 v63, s84, 28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v42 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v59, v26 -; SI-NEXT: v_mul_f32_e32 v32, 1.0, v2 -; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v6 +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v9 +; SI-NEXT: v_writelane_b32 v63, s85, 29 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v14 +; SI-NEXT: v_writelane_b32 v63, s86, 30 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v15 +; SI-NEXT: v_writelane_b32 v63, s87, 31 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v18 +; SI-NEXT: v_writelane_b32 v63, s96, 32 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v26 +; SI-NEXT: v_writelane_b32 v63, s97, 33 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: v_writelane_b32 v63, s98, 34 +; SI-NEXT: v_mov_b32_e32 v46, v21 +; SI-NEXT: v_writelane_b32 v63, s99, 35 +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v32, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v6 ; SI-NEXT: v_mul_f32_e32 v41, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v57, 1.0, v8 -; SI-NEXT: v_mul_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v56, 1.0, v10 -; SI-NEXT: v_mul_f32_e32 v26, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v61, 1.0, v12 -; SI-NEXT: v_mul_f32_e32 v63, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v11, 1.0, v14 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v40, 1.0, v15 -; SI-NEXT: v_mul_f32_e32 v18, 1.0, v17 -; SI-NEXT: v_mul_f32_e32 v17, 1.0, v20 -; SI-NEXT: v_mul_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_mul_f32_e32 v21, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v20, 1.0, v24 -; SI-NEXT: v_mul_f32_e32 v42, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v59 -; SI-NEXT: v_mul_f32_e32 v24, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v25, 1.0, v28 -; SI-NEXT: v_mul_f32_e32 v27, 1.0, v27 -; SI-NEXT: v_mul_f32_e32 v59, 1.0, v30 -; SI-NEXT: v_mul_f32_e32 v29, 1.0, v29 -; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v14, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v5, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v6, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v12, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v10, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v13, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v15, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s29 +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v5, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v60, 1.0, v11 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v31, 1.0, v16 -; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33 -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v34 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_mul_f32_e32 v21, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_mul_f32_e32 v9, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v12, 1.0, v19 +; SI-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v20, 1.0, v46 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v36 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v48 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v23 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v2, 1.0, v52 +; SI-NEXT: v_mul_f32_e32 v26, 1.0, v25 +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v16, 1.0, v27 +; SI-NEXT: v_mul_f32_e32 v28, 1.0, v30 +; SI-NEXT: v_mul_f32_e32 v30, 1.0, v29 +; SI-NEXT: v_mul_f32_e32 v31, 1.0, v33 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v34 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v35 +; SI-NEXT: v_mul_f32_e32 v29, 1.0, v35 +; SI-NEXT: v_mul_f32_e32 v36, 1.0, v36 ; SI-NEXT: v_mul_f32_e32 v35, 1.0, v37 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v38 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v39 -; SI-NEXT: v_mul_f32_e32 v37, 1.0, v48 +; SI-NEXT: v_mul_f32_e32 v34, 1.0, v38 +; SI-NEXT: v_mul_f32_e32 v37, 1.0, v39 +; SI-NEXT: v_mul_f32_e32 v48, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v50 +; SI-NEXT: v_mul_f32_e32 v33, 1.0, v51 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_mul_f32_e32 v33, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v53 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v54 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v50 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v55 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v51 +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v40 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v53 -; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v44 -; SI-NEXT: v_mul_f32_e32 v36, 1.0, v54 -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v55 -; SI-NEXT: v_mul_f32_e32 v62, 1.0, v43 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_mul_f32_e32 v44, 1.0, v45 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_mul_f32_e32 v46, 1.0, v46 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v47 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_mul_f32_e32 v43, 1.0, v58 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v49, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v54, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v16, 1.0, s28 -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v42 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_mul_f32_e32 v54, 1.0, v43 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v44 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v45 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e64 v2, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v11, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v3, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v4, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v14, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v15, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v7, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v6, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v18, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v19, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v10, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v23, 1.0, s29 +; SI-NEXT: v_mul_f32_e64 v25, 1.0, s28 +; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB91_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v2, v1, v2, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 -; SI-NEXT: v_alignbit_b32 v5, v1, v6, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v10 -; SI-NEXT: v_alignbit_b32 v6, v1, v13, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8 -; SI-NEXT: v_alignbit_b32 v10, v1, v16, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4 -; SI-NEXT: v_alignbit_b32 v4, v1, v3, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v57 -; SI-NEXT: v_alignbit_b32 v3, v1, v7, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61 -; SI-NEXT: v_alignbit_b32 v16, v1, v63, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v31 -; SI-NEXT: v_alignbit_b32 v13, v1, v40, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v17 -; SI-NEXT: v_alignbit_b32 v61, v1, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v20 -; SI-NEXT: v_alignbit_b32 v7, v1, v42, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v25 -; SI-NEXT: v_alignbit_b32 v63, v1, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v30 -; SI-NEXT: v_alignbit_b32 v45, v1, v28, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v35 -; SI-NEXT: v_alignbit_b32 v8, v1, v38, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v33 -; SI-NEXT: v_alignbit_b32 v33, v1, v50, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v36 -; SI-NEXT: v_mov_b32_e32 v57, v62 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v14 -; SI-NEXT: v_alignbit_b32 v28, v1, v55, 16 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v44 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_alignbit_b32 v44, v62, v9, 16 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v56 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v55, v9, v26, 16 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11 -; SI-NEXT: v_alignbit_b32 v38, v9, v52, 16 -; SI-NEXT: v_mov_b32_e32 v52, v59 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v49 -; SI-NEXT: v_alignbit_b32 v1, v1, v46, 16 -; SI-NEXT: v_alignbit_b32 v46, v47, v12, 16 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v36, v8 -; SI-NEXT: v_alignbit_b32 v8, v44, v2, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v44, v2, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v44, v2, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v46, v5, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v46, v5, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v46, v5, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v54 -; SI-NEXT: v_alignbit_b32 v19, v8, v15, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v6, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v6, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v11 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v6, 8 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v38 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v32 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v50, v8, v60, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v10, 24 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v22 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v10, 8 -; SI-NEXT: v_alignbit_b32 v25, v60, v41, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v25, v4, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v25, v4, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v25, v4, 8 -; SI-NEXT: v_mov_b32_e32 v17, v32 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v38, v16, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v38, v16, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v38, v16, 8 -; SI-NEXT: v_alignbit_b32 v20, v50, v10, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v14 -; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: v_mov_b32_e32 v58, v41 -; SI-NEXT: v_mov_b32_e32 v40, v26 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v44 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v59 -; SI-NEXT: v_alignbit_b32 v35, v9, v18, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v35, v13, 24 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v35, v13, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v42 -; SI-NEXT: v_alignbit_b32 v30, v9, v21, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v30, v61, 24 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v30, v61, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v30, v61, 8 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v23 -; SI-NEXT: v_alignbit_b32 v27, v9, v24, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v27, v7, 24 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v27, v7, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v27, v7, 8 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v52 -; SI-NEXT: v_alignbit_b32 v24, v9, v29, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v3 +; SI-NEXT: v_mov_b32_e32 v42, v37 +; SI-NEXT: v_alignbit_b32 v37, v2, v11, 16 +; SI-NEXT: v_alignbit_b32 v11, v44, v4, 16 +; SI-NEXT: v_readfirstlane_b32 s4, v37 +; SI-NEXT: v_readfirstlane_b32 s5, v11 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v14 +; SI-NEXT: v_writelane_b32 v62, s6, 0 +; SI-NEXT: v_alignbit_b32 v2, v2, v15, 16 +; SI-NEXT: v_writelane_b32 v62, s7, 1 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v7 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v24, v63, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v18 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v14, v52, v6, 16 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: v_alignbit_b32 v2, v2, v19, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[16:17], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v24, v63, 8 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v10 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v19, v2, v8, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v34 -; SI-NEXT: v_alignbit_b32 v21, v9, v12, 16 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v23 +; SI-NEXT: v_readfirstlane_b32 s5, v19 +; SI-NEXT: v_alignbit_b32 v2, v2, v25, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v56 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[18:19], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[22:23], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: v_alignbit_b32 v47, v45, v47, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v21, v45, 24 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_readfirstlane_b32 s5, v47 +; SI-NEXT: v_alignbit_b32 v2, v2, v1, 16 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v58 +; SI-NEXT: s_lshr_b64 s[20:21], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[24:25], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: v_mov_b32_e32 v4, v58 +; SI-NEXT: v_alignbit_b32 v58, v8, v41, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v21, v45, 8 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v59 +; SI-NEXT: v_readfirstlane_b32 s5, v58 +; SI-NEXT: v_alignbit_b32 v2, v2, v61, 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v39 -; SI-NEXT: v_alignbit_b32 v18, v9, v37, 16 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v2, v2, v60, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v23, v22 +; SI-NEXT: v_mov_b32_e32 v40, v36 +; SI-NEXT: s_mov_b64 vcc, 0 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v56 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v18 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_alignbit_b32 v41, v15, v6, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v41 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[4:5], 8 +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v18, v36, 24 -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v59 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v21 +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_alignbit_b32 v59, v1, v13, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s5, v59 +; SI-NEXT: s_waitcnt vmcnt(3) expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 +; SI-NEXT: s_lshr_b64 s[56:57], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[4:5], 8 +; SI-NEXT: v_alignbit_b32 v61, v1, v17, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v61 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v58 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_alignbit_b32 v2, v2, v21, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v18, v36, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v9 +; SI-NEXT: v_alignbit_b32 v2, v2, v12, 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v35 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v22 +; SI-NEXT: v_alignbit_b32 v60, v2, v20, 16 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v18, v36, 8 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v24 +; SI-NEXT: v_alignbit_b32 v1, v2, v46, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v60 +; SI-NEXT: s_lshr_b64 s[76:77], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[4:5], 8 +; SI-NEXT: v_readfirstlane_b32 s4, v1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v42 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v1, v5 +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v48 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v23 +; SI-NEXT: v_mov_b32_e32 v5, v28 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v10 +; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v47 +; SI-NEXT: v_lshrrev_b32_e32 v12, 8, v41 +; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v61 +; SI-NEXT: v_lshrrev_b32_e32 v23, 8, v60 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v20 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v25, v2, v26, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v30 -; SI-NEXT: v_alignbit_b32 v15, v9, v51, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v57 +; SI-NEXT: v_readfirstlane_b32 s5, v25 +; SI-NEXT: v_alignbit_b32 v2, v2, v16, 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v23 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v22, v2, v30, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v31 +; SI-NEXT: v_readfirstlane_b32 s5, v22 +; SI-NEXT: v_alignbit_b32 v2, v2, v27, 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v15, v33, 24 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_alignbit_b32 v17, v2, v36, 16 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v27 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_readfirstlane_b32 s5, v17 +; SI-NEXT: v_alignbit_b32 v2, v2, v34, 16 +; SI-NEXT: s_lshr_b64 s[48:49], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[50:51], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v15, v33, 16 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v42 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v20 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v52 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v15, v33, 8 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v22 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v24 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v29 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v29, v37 +; SI-NEXT: v_mov_b32_e32 v37, v42 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v57 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v17 -; SI-NEXT: v_lshrrev_b32_e32 v17, 8, v50 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v17 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v34 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v37 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v17, 24, v22 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v33 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v34, 24, v4 +; SI-NEXT: v_lshrrev_b32_e32 v16, 24, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 8, v11 +; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v7, 8, v14 +; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v19 +; SI-NEXT: v_lshrrev_b32_e32 v4, 8, v59 +; SI-NEXT: v_lshrrev_b32_e32 v35, 24, v43 +; SI-NEXT: v_mov_b32_e32 v31, v20 +; SI-NEXT: v_mov_b32_e32 v20, v34 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_alignbit_b32 v30, v2, v36, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_alignbit_b32 v2, v2, v39, 16 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s5, v30 +; SI-NEXT: s_lshr_b64 s[54:55], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[64:65], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v21 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v28, v36 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_alignbit_b32 v57, v2, v39, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v17, 8, v25 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v51 +; SI-NEXT: v_readfirstlane_b32 s5, v57 +; SI-NEXT: v_alignbit_b32 v2, v2, v50, 16 +; SI-NEXT: s_lshr_b64 s[66:67], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[70:71], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v39 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v49 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v46, v2, v38, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v17, 24, v56 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v55 +; SI-NEXT: v_readfirstlane_b32 s5, v46 +; SI-NEXT: v_alignbit_b32 v2, v2, v54, 16 +; SI-NEXT: s_lshr_b64 s[80:81], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[84:85], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[4:5], 8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v18 -; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v49 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v43 +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v57 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v38, v2, v53, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v17, 8, v55 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 24, v18 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v15 -; SI-NEXT: v_alignbit_b32 v8, v35, v13, 8 -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v49 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v54 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v2, v32 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v24, v63, 24 -; SI-NEXT: v_alignbit_b32 v31, v21, v45, 16 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v46 -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v19 -; SI-NEXT: v_mov_b32_e32 v39, v20 -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v48 -; SI-NEXT: v_lshrrev_b32_e32 v57, 24, v57 -; SI-NEXT: v_mov_b32_e32 v52, v42 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_alignbit_b32 v12, v9, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v28, 8 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v46 +; SI-NEXT: v_readfirstlane_b32 s5, v38 +; SI-NEXT: v_lshrrev_b32_e32 v2, 24, v2 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v53 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v9, v9, v43, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v17, 8, v9 -; SI-NEXT: v_alignbit_b32 v41, v12, v28, 24 -; SI-NEXT: v_alignbit_b32 v32, v12, v28, 16 -; SI-NEXT: v_alignbit_b32 v29, v9, v1, 24 -; SI-NEXT: v_alignbit_b32 v43, v9, v1, 16 -; SI-NEXT: v_alignbit_b32 v26, v9, v1, 8 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v38 +; SI-NEXT: s_lshr_b64 s[86:87], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[98:99], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[4:5], 8 +; SI-NEXT: v_mov_b32_e32 v32, v8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v18, 8, v25 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v53 -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 8, v30 +; SI-NEXT: v_mov_b32_e32 v55, v49 +; SI-NEXT: v_mov_b32_e32 v49, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v8, v6 ; SI-NEXT: s_branch .LBB91_3 ; SI-NEXT: .LBB91_2: -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v55, v49 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: v_writelane_b32 v62, s4, 0 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: v_mov_b32_e32 v40, v36 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: s_mov_b64 vcc, -1 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: v_writelane_b32 v62, s5, 1 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; kill: killed $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $vgpr60 +; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; kill: killed $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 @@ -162872,1143 +164740,1007 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: v_mov_b32_e32 v40, v26 -; SI-NEXT: v_mov_b32_e32 v58, v41 -; SI-NEXT: s_mov_b64 s[4:5], -1 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr57 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; kill: killed $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; kill: killed $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: .LBB91_3: ; %Flow -; SI-NEXT: v_mov_b32_e32 v56, v40 -; SI-NEXT: v_mov_b32_e32 v20, v11 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v40, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: s_andn2_b64 vcc, exec, vcc ; SI-NEXT: s_cbranch_vccnz .LBB91_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v43 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 -; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v25 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 -; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_and_b32_e32 v40, 0xffff0000, v40 -; SI-NEXT: v_add_f32_e32 v40, 0x40c00000, v40 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v43, 0xffff0000, v43 -; SI-NEXT: v_add_f32_e32 v43, 0x40c00000, v43 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v43 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 -; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v36 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v36 -; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_alignbit_b32 v29, v3, v2, 16 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_alignbit_b32 v50, v4, v3, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_alignbit_b32 v54, v5, v4, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v7, v3, v2, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v1 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_alignbit_b32 v45, v7, v5, 16 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v11 -; SI-NEXT: v_alignbit_b32 v9, v7, v6, 16 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v6 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v57, 24, v14 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v37 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_alignbit_b32 v10, v6, v4, 16 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v33 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s52, v10 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v51 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_alignbit_b32 v63, v10, v6, 16 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v14 -; SI-NEXT: v_alignbit_b32 v12, v10, v7, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v48 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v7 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x40c00000, v48 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v48 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_alignbit_b32 v7, v13, v7, 16 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v17 -; SI-NEXT: v_alignbit_b32 v15, v13, v10, 16 -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v2 -; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v10 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v8 -; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 -; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_alignbit_b32 v61, v16, v10, 16 -; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v20 -; SI-NEXT: v_alignbit_b32 v18, v16, v13, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v13 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 -; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 -; SI-NEXT: v_alignbit_b32 v13, v19, v13, 16 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v8 -; SI-NEXT: v_alignbit_b32 v21, v19, v16, 16 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_alignbit_b32 v9, v3, v2, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s86, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v20 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v20, 24, v11 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v16 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 -; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v16 -; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 -; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 -; SI-NEXT: v_alignbit_b32 v16, v22, v16, 16 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v23 -; SI-NEXT: v_alignbit_b32 v24, v22, v19, 16 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v23 -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v17 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v50 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: v_alignbit_b32 v3, v25, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v31 -; SI-NEXT: v_alignbit_b32 v27, v25, v22, 16 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v22 -; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v52 -; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v22 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v32 -; SI-NEXT: v_alignbit_b32 v30, v26, v25, 16 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_alignbit_b32 v12, v3, v2, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s80, v12 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 -; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 -; SI-NEXT: v_alignbit_b32 v4, v28, v22, 16 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 -; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v25 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v59 -; SI-NEXT: v_add_f32_e32 v39, 0x40c00000, v25 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v39 -; SI-NEXT: v_alignbit_b32 v35, v34, v28, 16 -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v26 -; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v43 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v43, v9, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 -; SI-NEXT: v_alignbit_b32 v10, v33, v25, 16 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 -; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v28 -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 -; SI-NEXT: v_add_f32_e32 v49, 0x40c00000, v28 -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v49 -; SI-NEXT: v_alignbit_b32 v38, v37, v33, 16 -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v56 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 -; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 -; SI-NEXT: v_alignbit_b32 v6, v36, v28, 16 -; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v33 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v28, v29 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v51, 0x40c00000, v33 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v51 -; SI-NEXT: v_alignbit_b32 v55, v53, v36, 16 -; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v58 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33 -; SI-NEXT: v_alignbit_b32 v5, v48, v33, 16 -; SI-NEXT: v_add_f32_e32 v48, 0x40c00000, v36 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v33, v50 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 -; SI-NEXT: v_add_f32_e32 v52, 0x40c00000, v36 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v52 -; SI-NEXT: v_alignbit_b32 v25, v60, v48, 16 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_alignbit_b32 v13, v3, v2, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s66, v13 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 -; SI-NEXT: v_add_f32_e32 v50, 0x40c00000, v50 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v50 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 -; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v36 -; SI-NEXT: v_alignbit_b32 v2, v50, v36, 16 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v36, v54 +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x40c00000, v48 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 -; SI-NEXT: v_add_f32_e32 v54, 0x40c00000, v50 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v54 -; SI-NEXT: v_alignbit_b32 v50, v19, v48, 16 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v40 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v29, 24, v54 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x40c00000, v48 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v19, v19, v48, 16 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v19 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 -; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 -; SI-NEXT: v_add_f32_e32 v42, 0x40c00000, v48 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v42 -; SI-NEXT: v_alignbit_b32 v46, v47, v41, 16 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v42 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v40 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v52 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v38, v4, v3, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v51 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v15, v7, v6, 16 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v42 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v46, v6, v3, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v49 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v48 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v39 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v23, v7, v6, 16 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v53 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v57, v6, v3, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v39 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v54 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v28 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v16, v7, v6, 16 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v30, v6, v3, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v32 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v21 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v40 +; SI-NEXT: v_alignbit_b32 v18, v9, v7, 16 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v34 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_alignbit_b32 v20, v10, v9, 16 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s38, v15 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s90, v16 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v31 -; SI-NEXT: v_mov_b32_e32 v51, v29 -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v39, v50, v10, 16 -; SI-NEXT: v_alignbit_b32 v42, v25, v4, 8 -; SI-NEXT: v_alignbit_b32 v40, v24, v63, 24 -; SI-NEXT: v_alignbit_b32 v31, v21, v45, 16 -; SI-NEXT: v_alignbit_b32 v32, v12, v28, 16 -; SI-NEXT: v_alignbit_b32 v29, v9, v1, 24 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s30, v23 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s76, v18 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s62, v20 +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v26, v9, v1, 8 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v46 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_and_b32_e32 v41, 0xffff0000, v41 -; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v41 -; SI-NEXT: v_alignbit_b32 v44, v62, v41, 16 -; SI-NEXT: v_alignbit_b32 v8, v44, v2, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v46, v5, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v46, v5, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v46, v5, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v6, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v6, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v6, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v10, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v50, v10, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v25, v4, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v25, v4, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v55, v3, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v38, v16, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v38, v16, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v38, v16, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v35, v13, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v35, v13, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v35, v13, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v61, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v61, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v30, v61, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v27, v7, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v24, v63, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v24, v63, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v21, v45, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v21, v45, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v18, v36, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v18, v36, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v18, v36, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v15, v33, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v15, v33, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v15, v33, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v12, v28, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v50 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v25 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v55 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v38 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v35 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v30 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 24, v4 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s5, v38 +; SI-NEXT: v_readfirstlane_b32 s87, v46 +; SI-NEXT: v_readfirstlane_b32 s81, v57 +; SI-NEXT: v_readfirstlane_b32 s67, v30 +; SI-NEXT: s_lshr_b64 s[54:55], s[66:67], 24 +; SI-NEXT: s_lshr_b64 s[64:65], s[66:67], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[66:67], 8 +; SI-NEXT: s_lshr_b64 s[66:67], s[80:81], 24 +; SI-NEXT: s_lshr_b64 s[70:71], s[80:81], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[80:81], 8 +; SI-NEXT: s_lshr_b64 s[80:81], s[86:87], 24 +; SI-NEXT: s_lshr_b64 s[84:85], s[86:87], 16 +; SI-NEXT: s_lshr_b64 s[96:97], s[86:87], 8 +; SI-NEXT: s_lshr_b64 s[86:87], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[98:99], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[4:5], 8 +; SI-NEXT: v_lshrrev_b32_e32 v35, 24, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 8, v30 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v17, v7, v3, 16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v27 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s53, v17 +; SI-NEXT: s_lshr_b64 s[48:49], s[52:53], 24 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_alignbit_b32 v21, v12, v10, 16 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_readfirstlane_b32 s56, v21 +; SI-NEXT: s_lshr_b64 s[50:51], s[52:53], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[52:53], 8 +; SI-NEXT: v_lshrrev_b32_e32 v6, 24, v6 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_alignbit_b32 v22, v9, v3, 16 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_alignbit_b32 v23, v13, v12, 16 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v31 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v9 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s42, v23 +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v22 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v21 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v17 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v18 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v57 +; SI-NEXT: v_readfirstlane_b32 s39, v22 +; SI-NEXT: v_lshrrev_b32_e32 v9, 24, v9 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v15 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v46 +; SI-NEXT: s_lshr_b64 s[36:37], s[38:39], 16 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v12 -; SI-NEXT: v_alignbit_b32 v17, v44, v2, 24 -; SI-NEXT: v_alignbit_b32 v11, v44, v2, 8 -; SI-NEXT: v_alignbit_b32 v41, v12, v28, 24 -; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v44 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v38 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v21 +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_alignbit_b32 v24, v15, v13, 16 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v25, v10, v3, 16 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s31, v25 +; SI-NEXT: v_readfirstlane_b32 s26, v24 +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: s_lshr_b64 s[94:95], s[30:31], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[30:31], 8 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v23 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: v_alignbit_b32 v26, v16, v15, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_readfirstlane_b32 s20, v26 +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v9 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: .LBB91_5: ; %end -; SI-NEXT: s_waitcnt vmcnt(4) expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v2 +; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v21 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v17 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v17, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v14 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v44 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v62 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v14 -; SI-NEXT: v_or_b32_e32 v11, v17, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v5 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v27, v18, v16, 16 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v10 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v60, v12, v3, 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s91, v60 +; SI-NEXT: v_readfirstlane_b32 s14, v27 +; SI-NEXT: v_lshrrev_b32_e32 v10, 24, v10 +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: s_lshr_b64 s[88:89], s[90:91], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[90:91], 8 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_alignbit_b32 v29, v20, v18, 16 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s8, v29 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12 +; SI-NEXT: v_alignbit_b32 v61, v11, v3, 16 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s77, v61 +; SI-NEXT: v_lshrrev_b32_e32 v12, 24, v12 +; SI-NEXT: s_lshr_b64 s[74:75], s[76:77], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[76:77], 8 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v61 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v13 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v13, 24, v13 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v18 +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v59, v36, v3, 16 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v8 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s63, v59 +; SI-NEXT: s_lshr_b64 s[60:61], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[62:63], 8 +; SI-NEXT: v_lshrrev_b32_e32 v4, 8, v59 +; SI-NEXT: v_lshrrev_b32_e32 v18, 8, v25 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v20 +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v3 +; SI-NEXT: v_alignbit_b32 v41, v49, v15, 16 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s57, v41 +; SI-NEXT: v_lshrrev_b32_e32 v3, 24, v3 +; SI-NEXT: s_lshr_b64 s[46:47], s[56:57], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[56:57], 8 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v12, 8, v41 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v14 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v15 +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v17, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v46 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v49 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v15 +; SI-NEXT: v_alignbit_b32 v58, v32, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s43, v58 +; SI-NEXT: s_lshr_b64 s[40:41], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[42:43], 8 +; SI-NEXT: v_lshrrev_b32_e32 v20, 24, v15 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v58 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v63 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v47, v45, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s27, v47 +; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v47 +; SI-NEXT: s_lshr_b64 s[24:25], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[26:27], 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v54 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v19, v11, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s21, v19 +; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v19 +; SI-NEXT: s_lshr_b64 s[18:19], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[22:23], s[20:21], 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v10 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v14, v52, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s15, v14 +; SI-NEXT: v_lshrrev_b32_e32 v7, 8, v14 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_alignbit_b32 v11, v44, v16, 16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 24, v23 +; SI-NEXT: v_lshrrev_b32_e32 v23, 8, v60 +; SI-NEXT: v_readfirstlane_b32 s9, v11 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v3, 8, v11 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[8:9], 8 +; SI-NEXT: v_writelane_b32 v62, s6, 0 +; SI-NEXT: v_writelane_b32 v62, s7, 1 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[20:21], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[56:57], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[62:63], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[76:77], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[90:91], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[30:31], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[38:39], 24 +; SI-NEXT: s_lshr_b64 s[38:39], s[38:39], 8 +; SI-NEXT: .LBB91_5: ; %end +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v29 +; SI-NEXT: s_lshl_b32 s5, s10, 8 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s6, 0xff +; SI-NEXT: v_readlane_b32 s6, v62, 0 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v51 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v5 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v16 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v42 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v45 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v36 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s16, 8 +; SI-NEXT: s_lshl_b32 s6, s8, 24 +; SI-NEXT: v_add_i32_e32 v5, vcc, 8, v0 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v26 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: s_lshl_b32 s4, s4, 8 +; SI-NEXT: v_readlane_b32 s7, v62, 1 +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s31, v63, 1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v2 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s12, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v25 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v60 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v2 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v52 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v33 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s22, 8 +; SI-NEXT: s_lshl_b32 s6, s14, 24 +; SI-NEXT: v_add_i32_e32 v5, vcc, 16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v2 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s18, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v55 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v53 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s28, 8 +; SI-NEXT: s_lshl_b32 s6, s20, 24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v2 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v16 -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v56 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s24, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v47 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v9 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v45 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v38 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s44, 8 +; SI-NEXT: s_lshl_b32 s6, s26, 24 +; SI-NEXT: v_add_i32_e32 v5, vcc, 32, v0 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v20 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s40, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v58 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v13 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s58, 8 +; SI-NEXT: s_lshl_b32 s6, s42, 24 +; SI-NEXT: v_add_i32_e32 v5, vcc, 40, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v2 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s46, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v12 +; SI-NEXT: v_or_b32_e32 v1, v1, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v49 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s72, 8 +; SI-NEXT: s_lshl_b32 s6, s56, 24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v13, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_or_b32_e32 v3, v3, v5 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v35 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s60, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v36 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s78, 8 +; SI-NEXT: s_lshl_b32 s6, s62, 24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v2 -; SI-NEXT: v_or_b32_e32 v11, v13, v11 -; SI-NEXT: v_or_b32_e32 v8, v8, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v8, v11, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v61 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 -; SI-NEXT: v_or_b32_e32 v8, v8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v11, v10 -; SI-NEXT: v_or_b32_e32 v8, v8, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v8, v10, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s74, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v30 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v2 -; SI-NEXT: v_or_b32_e32 v8, v8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v61 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v10 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s92, 8 +; SI-NEXT: s_lshl_b32 s6, s76, 24 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v11, v10 -; SI-NEXT: v_or_b32_e32 v8, v8, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v8, v10, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v8, v10, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s88, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v27 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v2 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v60 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v23 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s34, 8 +; SI-NEXT: s_lshl_b32 s6, s90, 24 +; SI-NEXT: v_readlane_b32 s34, v63, 2 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v8, v10, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v40 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s94, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v24 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v2 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v25 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v18 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s38, 8 +; SI-NEXT: s_lshl_b32 s6, s30, 24 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s30, v63, 0 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s36, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v21 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v22 +; SI-NEXT: s_lshl_b32 s5, s52, 8 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s6, s48, 24 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s36, v63, 4 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v2 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s50, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v18 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v17 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s68, 8 +; SI-NEXT: s_lshl_b32 s6, s54, 24 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s50, v63, 10 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v2 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s64, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v23 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v2 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v28 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_lshl_b32 s5, s82, 8 +; SI-NEXT: s_lshl_b32 s6, s66, 24 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s64, v63, 16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v41 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s70, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v57 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v57 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s96, 8 +; SI-NEXT: s_lshl_b32 s6, s80, 24 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s70, v63, 22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: s_and_b32 s5, s84, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v1, s5, v1 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v26 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v46 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s5, s86, 24 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s84, v63, 28 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v43 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v29 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v20 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v35 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 +; SI-NEXT: s_and_b32 s4, s98, 0xff +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 +; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v38 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 @@ -164016,22 +165748,24 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; @@ -186200,23 +187934,24 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; SI-LABEL: bitcast_v64f16_to_v128i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:80 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4 ; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 @@ -186225,1829 +187960,1684 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i ; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:20 ; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:16 ; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:24 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:36 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:32 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:44 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v63, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:72 +; SI-NEXT: v_writelane_b32 v63, s30, 0 +; SI-NEXT: v_writelane_b32 v63, s31, 1 +; SI-NEXT: v_writelane_b32 v63, s34, 2 +; SI-NEXT: v_writelane_b32 v63, s35, 3 +; SI-NEXT: v_writelane_b32 v63, s36, 4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_writelane_b32 v63, s37, 5 +; SI-NEXT: v_writelane_b32 v63, s38, 6 +; SI-NEXT: v_writelane_b32 v63, s39, 7 +; SI-NEXT: v_writelane_b32 v63, s48, 8 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v29, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v60, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 -; SI-NEXT: v_mov_b32_e32 v59, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v4 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v7 +; SI-NEXT: v_writelane_b32 v63, s49, 9 +; SI-NEXT: v_writelane_b32 v63, s50, 10 +; SI-NEXT: v_writelane_b32 v63, s51, 11 +; SI-NEXT: v_writelane_b32 v63, s52, 12 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v9 +; SI-NEXT: v_writelane_b32 v63, s53, 13 +; SI-NEXT: v_writelane_b32 v63, s54, 14 +; SI-NEXT: v_writelane_b32 v63, s55, 15 +; SI-NEXT: v_writelane_b32 v63, s64, 16 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v26 +; SI-NEXT: v_writelane_b32 v63, s65, 17 +; SI-NEXT: v_writelane_b32 v63, s66, 18 +; SI-NEXT: v_writelane_b32 v63, s67, 19 +; SI-NEXT: v_writelane_b32 v63, s68, 20 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v25 +; SI-NEXT: v_writelane_b32 v63, s69, 21 +; SI-NEXT: v_writelane_b32 v63, s70, 22 +; SI-NEXT: v_writelane_b32 v63, s71, 23 +; SI-NEXT: v_writelane_b32 v63, s80, 24 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v30 +; SI-NEXT: v_writelane_b32 v63, s81, 25 +; SI-NEXT: v_writelane_b32 v63, s82, 26 +; SI-NEXT: v_writelane_b32 v63, s83, 27 +; SI-NEXT: v_writelane_b32 v63, s84, 28 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_writelane_b32 v63, s85, 29 +; SI-NEXT: v_writelane_b32 v63, s86, 30 +; SI-NEXT: v_writelane_b32 v63, s87, 31 +; SI-NEXT: v_mov_b32_e32 v46, v29 +; SI-NEXT: v_writelane_b32 v63, s96, 32 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v33, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v61, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v32, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v31, v14 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v6, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v15 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v59 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v59, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v62, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v15, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v36 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v39 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v56 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v60 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v50 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_cvt_f16_f32_e32 v48, v52 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v11, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v51 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v52 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v51, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v53 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v53, v40 -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v49, v54 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v40, v44 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v11, s17 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v52, v47 -; SI-NEXT: v_cvt_f16_f32_e32 v44, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s21 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v28, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v46, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v55 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_cvt_f16_f32_e32 v53, v40 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_cvt_f16_f32_e32 v55, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v17, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v37, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v48, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v22, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v30, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s28 +; SI-NEXT: v_writelane_b32 v63, s97, 33 +; SI-NEXT: v_writelane_b32 v63, s98, 34 +; SI-NEXT: v_writelane_b32 v63, s99, 35 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB95_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v13, v13, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v44 -; SI-NEXT: v_or_b32_e32 v55, v12, v11 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v17 -; SI-NEXT: v_or_b32_e32 v57, v16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v20 -; SI-NEXT: v_or_b32_e32 v17, v14, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v22 -; SI-NEXT: v_or_b32_e32 v21, v21, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v15 -; SI-NEXT: v_or_b32_e32 v16, v19, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v24 -; SI-NEXT: v_or_b32_e32 v19, v23, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v29 -; SI-NEXT: v_or_b32_e32 v47, v60, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_or_b32_e32 v43, v42, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v33 -; SI-NEXT: v_or_b32_e32 v14, v63, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v61 -; SI-NEXT: v_or_b32_e32 v58, v58, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v32 -; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: v_readfirstlane_b32 s4, v19 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: s_or_b32 s44, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v12 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v17 +; SI-NEXT: s_or_b32 s45, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[44:45], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 4 +; SI-NEXT: v_writelane_b32 v62, s5, 5 +; SI-NEXT: s_lshr_b64 s[4:5], s[44:45], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 2 +; SI-NEXT: v_writelane_b32 v62, s5, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[44:45], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 0 +; SI-NEXT: v_writelane_b32 v62, s5, 1 +; SI-NEXT: v_readfirstlane_b32 s4, v38 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v37 +; SI-NEXT: s_or_b32 s42, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v48 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v22 +; SI-NEXT: s_or_b32 s43, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[42:43], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 10 +; SI-NEXT: v_writelane_b32 v62, s5, 11 +; SI-NEXT: s_lshr_b64 s[4:5], s[42:43], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 8 +; SI-NEXT: v_writelane_b32 v62, s5, 9 +; SI-NEXT: s_lshr_b64 s[4:5], s[42:43], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 6 +; SI-NEXT: v_writelane_b32 v62, s5, 7 +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s4, v15 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_or_b32 s40, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v30 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v39 +; SI-NEXT: s_or_b32 s41, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 16 +; SI-NEXT: v_writelane_b32 v62, s5, 17 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 14 +; SI-NEXT: v_writelane_b32 v62, s5, 15 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 12 +; SI-NEXT: v_writelane_b32 v62, s5, 13 +; SI-NEXT: v_readfirstlane_b32 s4, v35 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v20 +; SI-NEXT: s_or_b32 s28, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v47 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_mov_b32_e32 v22, v2 +; SI-NEXT: v_mov_b32_e32 v39, v5 +; SI-NEXT: v_mov_b32_e32 v60, v16 +; SI-NEXT: v_readfirstlane_b32 s46, v55 +; SI-NEXT: v_mov_b32_e32 v17, v43 +; SI-NEXT: v_mov_b32_e32 v40, v34 +; SI-NEXT: v_mov_b32_e32 v41, v21 +; SI-NEXT: v_mov_b32_e32 v51, v42 +; SI-NEXT: s_lshr_b32 s71, s45, 8 +; SI-NEXT: s_lshr_b32 s70, s43, 8 +; SI-NEXT: s_lshr_b32 s69, s41, 8 +; SI-NEXT: v_bfe_u32 v38, v47, 8, 8 +; SI-NEXT: v_bfe_u32 v37, v33, 8, 8 +; SI-NEXT: v_bfe_u32 v35, v32, 8, 8 +; SI-NEXT: v_bfe_u32 v20, v10, 8, 8 +; SI-NEXT: v_bfe_u32 v19, v9, 8, 8 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v4 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: s_or_b32 s29, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 22 +; SI-NEXT: v_writelane_b32 v62, s5, 23 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 20 +; SI-NEXT: v_writelane_b32 v62, s5, 21 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 18 +; SI-NEXT: v_writelane_b32 v62, s5, 19 +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: v_mov_b32_e32 v2, v1 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_or_b32 s26, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v33 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v43 +; SI-NEXT: s_or_b32 s27, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 28 +; SI-NEXT: v_writelane_b32 v62, s5, 29 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 26 +; SI-NEXT: v_writelane_b32 v62, s5, 27 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 24 +; SI-NEXT: v_writelane_b32 v62, s5, 25 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshr_b32 s68, s29, 8 +; SI-NEXT: s_lshr_b32 s66, s27, 8 +; SI-NEXT: v_bfe_u32 v43, v31, 8, 8 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s24, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v32 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_bfe_u32 v15, v5, 8, 8 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_or_b32 s25, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 34 +; SI-NEXT: v_writelane_b32 v62, s5, 35 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 32 +; SI-NEXT: v_writelane_b32 v62, s5, 33 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 30 +; SI-NEXT: v_writelane_b32 v62, s5, 31 +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v29 +; SI-NEXT: s_or_b32 s22, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v6 +; SI-NEXT: s_or_b32 s23, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 40 +; SI-NEXT: v_writelane_b32 v62, s5, 41 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: v_writelane_b32 v62, s4, 38 +; SI-NEXT: v_writelane_b32 v62, s5, 39 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 8 +; SI-NEXT: v_writelane_b32 v62, s4, 36 +; SI-NEXT: v_writelane_b32 v62, s5, 37 +; SI-NEXT: v_readfirstlane_b32 s4, v58 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v13 +; SI-NEXT: s_or_b32 s20, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v10 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v11 +; SI-NEXT: s_or_b32 s21, s5, s4 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 24 +; SI-NEXT: v_writelane_b32 v62, s4, 44 +; SI-NEXT: v_writelane_b32 v62, s5, 45 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: v_mov_b32_e32 v58, v11 +; SI-NEXT: v_writelane_b32 v62, s4, 42 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_writelane_b32 v62, s5, 43 +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v7, v29 +; SI-NEXT: v_mov_b32_e32 v29, v6 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s4, v34 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_or_b32 s18, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v9 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v61 +; SI-NEXT: s_or_b32 s19, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v21 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v24 +; SI-NEXT: s_or_b32 s16, s5, s4 +; SI-NEXT: v_mov_b32_e32 v1, v53 +; SI-NEXT: v_mov_b32_e32 v34, v61 +; SI-NEXT: v_mov_b32_e32 v21, v24 +; SI-NEXT: s_lshr_b32 s64, s25, 8 +; SI-NEXT: s_lshr_b32 s54, s23, 8 +; SI-NEXT: s_lshr_b32 s52, s21, 8 +; SI-NEXT: s_lshr_b32 s50, s19, 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[96:97], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[98:99], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 8 +; SI-NEXT: v_mov_b32_e32 v13, v12 +; SI-NEXT: v_bfe_u32 v24, v12, 8, 8 +; SI-NEXT: v_mov_b32_e32 v12, v48 +; SI-NEXT: v_bfe_u32 v48, v48, 8, 8 +; SI-NEXT: v_bfe_u32 v61, v59, 8, 8 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_readfirstlane_b32 s4, v11 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_bfe_u32 v18, v11, 8, 8 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: s_or_b32 s17, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v44 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v42 +; SI-NEXT: s_or_b32 s14, s5, s4 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v60, v12, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v3 -; SI-NEXT: v_or_b32_e32 v22, v2, v11 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v52 -; SI-NEXT: v_or_b32_e32 v12, v46, v12 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v24, v2, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 -; SI-NEXT: v_or_b32_e32 v4, v4, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_or_b32_e32 v34, v34, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v6 -; SI-NEXT: v_or_b32_e32 v3, v59, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_or_b32_e32 v59, v56, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v18 -; SI-NEXT: v_or_b32_e32 v6, v62, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v7 -; SI-NEXT: v_or_b32_e32 v62, v25, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v26 -; SI-NEXT: v_or_b32_e32 v2, v27, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v8 -; SI-NEXT: v_or_b32_e32 v25, v28, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v30 -; SI-NEXT: v_or_b32_e32 v1, v36, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v5 -; SI-NEXT: v_or_b32_e32 v23, v35, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v38 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v18, v39, v11 -; SI-NEXT: v_mov_b32_e32 v36, v2 -; SI-NEXT: v_mov_b32_e32 v35, v1 -; SI-NEXT: v_alignbit_b32 v1, v55, v13, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v55, v13, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v55, v13, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v17, v57, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v17, v57, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v17, v57, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v21, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v21, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v21, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v47, v19, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v47, v19, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v47, v19, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v14, v43, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v14, v43, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v14, v43, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v60, v58, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v60, v58, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v60, v58, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v24, v22, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v24, v22, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v24, v22, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v34, v4, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v34, v4, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v34, v4, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v59, v3, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v59, v3, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v59, v3, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v38 -; SI-NEXT: v_or_b32_e32 v61, v50, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v49 -; SI-NEXT: v_or_b32_e32 v2, v48, v11 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v49, v6 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v62, v49, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v62, v49, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v62, v49, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v25, v36, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v25, v36, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v25, v36, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v23, v35, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v23, v35, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v23, v35, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v61, v18, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v61, v18, 16 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v48 -; SI-NEXT: v_or_b32_e32 v56, v54, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v51 -; SI-NEXT: v_or_b32_e32 v6, v53, v11 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v1, v61, v18, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v56, v2, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v1, v56, v2, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v55 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v47 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v14 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v60 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v34 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v59 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v62 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v25 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v23 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v37 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v61 -; SI-NEXT: v_or_b32_e32 v54, v40, v11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v56 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v54 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v12 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v44, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v20, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v15, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v29, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v33, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v32, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v31, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v10, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v9, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v8, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v5, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v38, 8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v48, 8, 8 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v41 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v37, 8, 8 -; SI-NEXT: v_or_b32_e32 v11, v45, v11 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v1, v52, 8, 8 -; SI-NEXT: v_alignbit_b32 v28, v56, v2, 24 -; SI-NEXT: v_alignbit_b32 v2, v54, v6, 24 -; SI-NEXT: v_alignbit_b32 v39, v54, v6, 16 -; SI-NEXT: v_alignbit_b32 v30, v54, v6, 8 -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 24 -; SI-NEXT: v_alignbit_b32 v42, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v26, v12, v11, 8 -; SI-NEXT: v_mov_b32_e32 v20, v29 -; SI-NEXT: v_mov_b32_e32 v15, v33 -; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_readfirstlane_b32 s4, v6 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v46 +; SI-NEXT: s_or_b32 s15, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v8 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v23 +; SI-NEXT: s_or_b32 s12, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v3 +; SI-NEXT: s_or_b32 s13, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v27 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v28 +; SI-NEXT: s_or_b32 s10, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v4 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v45 +; SI-NEXT: s_or_b32 s11, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v36 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v57 +; SI-NEXT: s_or_b32 s8, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v59 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v56 +; SI-NEXT: s_or_b32 s9, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v26 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v25 +; SI-NEXT: s_or_b32 s6, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v52 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v50 +; SI-NEXT: s_or_b32 s7, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v49 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v54 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v53 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_or_b32 s5, s46, s5 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 24 +; SI-NEXT: v_writelane_b32 v62, vcc_lo, 48 +; SI-NEXT: v_writelane_b32 v62, vcc_hi, 49 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 8 +; SI-NEXT: v_writelane_b32 v62, vcc_lo, 46 +; SI-NEXT: s_lshr_b32 s48, s17, 8 +; SI-NEXT: s_lshr_b32 s67, s15, 8 +; SI-NEXT: s_lshr_b32 s65, s13, 8 +; SI-NEXT: s_lshr_b32 s55, s11, 8 +; SI-NEXT: s_lshr_b32 s53, s9, 8 +; SI-NEXT: s_lshr_b32 s51, s7, 8 +; SI-NEXT: s_lshr_b32 s49, s5, 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[82:83], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[84:85], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[86:87], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[80:81], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v62, vcc_hi, 47 +; SI-NEXT: s_mov_b64 vcc, 0 +; SI-NEXT: v_mov_b32_e32 v57, v30 +; SI-NEXT: v_bfe_u32 v50, v30, 8, 8 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v46, v33 +; SI-NEXT: v_mov_b32_e32 v30, v32 ; SI-NEXT: v_mov_b32_e32 v32, v31 ; SI-NEXT: v_mov_b32_e32 v31, v10 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v9, v7 -; SI-NEXT: v_bfe_u32 v29, v7, 8, 8 -; SI-NEXT: v_mov_b32_e32 v7, v8 -; SI-NEXT: v_mov_b32_e32 v8, v5 -; SI-NEXT: v_mov_b32_e32 v44, v37 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v54, v9 +; SI-NEXT: v_bfe_u32 v42, v6, 8, 8 +; SI-NEXT: v_bfe_u32 v45, v4, 8, 8 +; SI-NEXT: v_bfe_u32 v47, v52, 8, 8 +; SI-NEXT: v_bfe_u32 v33, v1, 8, 8 +; SI-NEXT: v_mov_b32_e32 v3, v14 +; SI-NEXT: v_mov_b32_e32 v25, v59 +; SI-NEXT: v_mov_b32_e32 v1, v52 +; SI-NEXT: v_mov_b32_e32 v44, v11 ; SI-NEXT: s_branch .LBB95_3 ; SI-NEXT: .LBB95_2: -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: v_mov_b32_e32 v20, v29 -; SI-NEXT: v_mov_b32_e32 v15, v33 -; SI-NEXT: v_mov_b32_e32 v33, v32 +; SI-NEXT: v_mov_b32_e32 v60, v16 +; SI-NEXT: s_waitcnt expcnt(4) +; SI-NEXT: v_mov_b32_e32 v7, v29 +; SI-NEXT: v_mov_b32_e32 v29, v6 +; SI-NEXT: v_mov_b32_e32 v39, v5 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: v_mov_b32_e32 v51, v42 +; SI-NEXT: v_writelane_b32 v62, s4, 0 +; SI-NEXT: v_writelane_b32 v62, s5, 1 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v41, v21 +; SI-NEXT: v_writelane_b32 v62, s4, 2 +; SI-NEXT: v_writelane_b32 v62, s5, 3 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v21, v24 +; SI-NEXT: v_writelane_b32 v62, s4, 4 +; SI-NEXT: v_writelane_b32 v62, s5, 5 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v40, v34 +; SI-NEXT: v_writelane_b32 v62, s4, 6 +; SI-NEXT: v_writelane_b32 v62, s5, 7 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v34, v61 +; SI-NEXT: v_writelane_b32 v62, s4, 8 +; SI-NEXT: v_writelane_b32 v62, s5, 9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v13, v12 +; SI-NEXT: v_writelane_b32 v62, s4, 10 +; SI-NEXT: v_writelane_b32 v62, s5, 11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v12, v48 +; SI-NEXT: v_writelane_b32 v62, s4, 12 +; SI-NEXT: v_writelane_b32 v62, s5, 13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v57, v30 +; SI-NEXT: v_writelane_b32 v62, s4, 14 +; SI-NEXT: v_writelane_b32 v62, s5, 15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v58, v11 +; SI-NEXT: v_writelane_b32 v62, s4, 16 +; SI-NEXT: v_writelane_b32 v62, s5, 17 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_writelane_b32 v62, s4, 18 +; SI-NEXT: v_writelane_b32 v62, s5, 19 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v46, v33 +; SI-NEXT: v_writelane_b32 v62, s4, 20 +; SI-NEXT: v_writelane_b32 v62, s5, 21 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v30, v32 +; SI-NEXT: v_writelane_b32 v62, s4, 22 +; SI-NEXT: v_writelane_b32 v62, s5, 23 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: v_mov_b32_e32 v32, v31 +; SI-NEXT: v_writelane_b32 v62, s4, 24 +; SI-NEXT: v_writelane_b32 v62, s5, 25 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: v_mov_b32_e32 v31, v10 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v9, v7 -; SI-NEXT: v_mov_b32_e32 v7, v8 -; SI-NEXT: v_mov_b32_e32 v8, v5 -; SI-NEXT: v_mov_b32_e32 v44, v37 -; SI-NEXT: s_mov_b64 s[4:5], -1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: v_writelane_b32 v62, s4, 26 +; SI-NEXT: v_writelane_b32 v62, s5, 27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v54, v9 +; SI-NEXT: v_writelane_b32 v62, s4, 28 +; SI-NEXT: v_writelane_b32 v62, s5, 29 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v55, v4 +; SI-NEXT: v_writelane_b32 v62, s4, 30 +; SI-NEXT: v_writelane_b32 v62, s5, 31 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v22, v2 +; SI-NEXT: v_writelane_b32 v62, s4, 32 +; SI-NEXT: v_writelane_b32 v62, s5, 33 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v2, v1 +; SI-NEXT: v_writelane_b32 v62, s4, 34 +; SI-NEXT: v_writelane_b32 v62, s5, 35 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v17, v43 +; SI-NEXT: v_writelane_b32 v62, s4, 36 +; SI-NEXT: v_writelane_b32 v62, s5, 37 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_mov_b64 vcc, -1 +; SI-NEXT: v_writelane_b32 v62, s4, 38 +; SI-NEXT: v_writelane_b32 v62, s5, 39 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v25, v59 +; SI-NEXT: v_writelane_b32 v62, s4, 40 +; SI-NEXT: v_writelane_b32 v62, s5, 41 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_mov_b32_e32 v1, v52 +; SI-NEXT: v_writelane_b32 v62, s4, 42 +; SI-NEXT: v_writelane_b32 v62, s5, 43 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr71 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr64 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr96 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr67 ; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: v_writelane_b32 v62, s4, 44 +; SI-NEXT: v_writelane_b32 v62, s5, 45 +; SI-NEXT: v_writelane_b32 v62, s80, 46 +; SI-NEXT: v_writelane_b32 v62, s81, 47 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v62, s80, 48 +; SI-NEXT: v_writelane_b32 v62, s81, 49 +; SI-NEXT: ; implicit-def: $sgpr80 ; SI-NEXT: .LBB95_3: ; %Flow -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v1, v44 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v5, v8 -; SI-NEXT: v_mov_b32_e32 v6, v7 -; SI-NEXT: v_mov_b32_e32 v7, v9 -; SI-NEXT: v_mov_b32_e32 v8, v10 -; SI-NEXT: v_mov_b32_e32 v9, v31 -; SI-NEXT: v_mov_b32_e32 v31, v33 -; SI-NEXT: v_mov_b32_e32 v44, v15 -; SI-NEXT: v_mov_b32_e32 v33, v20 -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v38, v2 -; SI-NEXT: v_mov_b32_e32 v53, v30 -; SI-NEXT: v_mov_b32_e32 v40, v28 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_mov_b32_e32 v2, v48 -; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v30, v29 -; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload -; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; SI-NEXT: v_mov_b32_e32 v50, v42 -; SI-NEXT: v_mov_b32_e32 v42, v26 -; SI-NEXT: v_mov_b32_e32 v26, v52 +; SI-NEXT: v_mov_b32_e32 v14, v17 +; SI-NEXT: v_mov_b32_e32 v17, v39 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: s_andn2_b64 vcc, exec, vcc +; SI-NEXT: v_mov_b32_e32 v23, v2 +; SI-NEXT: v_mov_b32_e32 v59, v34 +; SI-NEXT: v_mov_b32_e32 v2, v25 ; SI-NEXT: s_cbranch_vccnz .LBB95_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v52, v29 +; SI-NEXT: v_mov_b32_e32 v29, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v18, v14 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_mov_b32_e32 v15, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v53 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v55 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v4 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v31 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f32_f16_e32 v35, v37 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v8 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f32_f16_e32 v11, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v31, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v10 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v34 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v11 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v13, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_bfe_u32 v50, v57, 8, 8 +; SI-NEXT: v_bfe_u32 v48, v12, 8, 8 +; SI-NEXT: v_bfe_u32 v43, v32, 8, 8 +; SI-NEXT: v_bfe_u32 v24, v13, 8, 8 +; SI-NEXT: v_bfe_u32 v20, v31, 8, 8 +; SI-NEXT: v_bfe_u32 v19, v54, 8, 8 +; SI-NEXT: v_bfe_u32 v42, v6, 8, 8 +; SI-NEXT: v_bfe_u32 v45, v55, 8, 8 +; SI-NEXT: v_bfe_u32 v61, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v33, v53, 8, 8 +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v7 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s4, v14 +; SI-NEXT: s_lshl_b32 s4, s4, 16 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v44 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v7 +; SI-NEXT: v_readfirstlane_b32 s5, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v53 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_or_b32_e32 v36, v14, v13 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_readfirstlane_b32 s6, v14 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s49, s5, 8 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_cvt_f32_f16_e32 v14, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_or_b32_e32 v54, v14, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v4, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v13 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_or_b32_e32 v52, v17, v16 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v27, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v17, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: v_or_b32_e32 v56, v17, v19 -; SI-NEXT: v_alignbit_b32 v40, v56, v52, 24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v19, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; SI-NEXT: v_readfirstlane_b32 s6, v14 +; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_or_b32_e32 v42, v21, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s7, v14 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readfirstlane_b32 s7, v3 +; SI-NEXT: s_lshl_b32 s7, s7, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v1 -; SI-NEXT: v_mov_b32_e32 v1, v34 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v8 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_or_b32_e32 v61, v21, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s8, v14 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_lshr_b64 s[82:83], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[84:85], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[86:87], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s51, s7, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v23, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v16, v23, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s8, v14 +; SI-NEXT: s_lshl_b32 s8, s8, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v23, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s9, v14 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readfirstlane_b32 s9, v2 +; SI-NEXT: s_lshl_b32 s9, s9, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v24, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s10, v14 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[8:9], 8 +; SI-NEXT: s_lshr_b32 s53, s9, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v26, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_or_b32_e32 v48, v25, v24 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s10, v14 +; SI-NEXT: s_lshl_b32 s10, s10, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v26, v3 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_or_b32_e32 v25, v26, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_or_b32_e32 v53, v26, v27 -; SI-NEXT: v_mov_b32_e32 v26, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v28, v4 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_or_b32_e32 v62, v28, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s11, v14 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_readfirstlane_b32 s11, v55 +; SI-NEXT: s_lshl_b32 s11, s11, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v29, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 24 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v63 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_or_b32_e32 v59, v29, v34 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v35 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v29, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v51 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v30, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_or_b32_e32 v3, v30, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v44 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_or_b32_e32 v4, v34, v30 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v32 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_mov_b32_e32 v30, v10 -; SI-NEXT: v_mov_b32_e32 v32, v30 -; SI-NEXT: v_or_b32_e32 v34, v35, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v46 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v51 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_or_b32_e32 v22, v35, v36 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v10 -; SI-NEXT: v_mov_b32_e32 v35, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s12, v14 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[10:11], 8 +; SI-NEXT: s_lshr_b32 s55, s11, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v24, v37, v36 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v38 -; SI-NEXT: v_or_b32_e32 v58, v39, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v45 -; SI-NEXT: v_mov_b32_e32 v36, v48 -; SI-NEXT: v_alignbit_b32 v38, v54, v29, 24 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s12, v14 +; SI-NEXT: s_lshl_b32 s12, s12, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v13 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v60, v37, v39 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v49 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_alignbit_b32 v39, v54, v29, 16 -; SI-NEXT: v_or_b32_e32 v43, v48, v37 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v44 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s13, v14 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readfirstlane_b32 s13, v5 +; SI-NEXT: s_lshl_b32 s13, s13, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v14, v49, v48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v28, v14, v43, 8 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s14, v14 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: s_lshr_b64 s[78:79], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[12:13], 8 +; SI-NEXT: s_lshr_b32 s65, s13, 8 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v51 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s15, v14 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_readfirstlane_b32 s15, v6 +; SI-NEXT: s_lshl_b32 s15, s15, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_or_b32_e32 v19, v48, v37 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v33 -; SI-NEXT: v_or_b32_e32 v47, v49, v37 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s16, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v41 +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: s_lshr_b64 s[92:93], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[14:15], 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshr_b64 s[30:31], s[14:15], 8 +; SI-NEXT: s_lshr_b32 s67, s15, 8 +; SI-NEXT: v_readfirstlane_b32 s16, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v21 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s17, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readfirstlane_b32 s17, v44 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s18, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v40 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshr_b64 s[38:39], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s48, s17, 8 +; SI-NEXT: v_readfirstlane_b32 s18, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v60 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s19, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v59 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: v_readfirstlane_b32 s19, v54 +; SI-NEXT: s_lshl_b32 s19, s19, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s20, v14 +; SI-NEXT: s_or_b32 s19, s20, s19 +; SI-NEXT: s_lshr_b64 s[96:97], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[98:99], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 8 +; SI-NEXT: s_lshr_b32 s50, s19, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v48 -; SI-NEXT: v_or_b32_e32 v21, v50, v37 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_or_b32_e32 v16, v37, v49 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_alignbit_b32 v50, v12, v11, 16 -; SI-NEXT: v_or_b32_e32 v57, v48, v37 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v18 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_or_b32_e32 v17, v49, v48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_mov_b32_e32 v49, v53 -; SI-NEXT: v_alignbit_b32 v53, v54, v29, 8 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s20, v14 +; SI-NEXT: s_lshl_b32 s20, s20, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_or_b32_e32 v13, v48, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v20 -; SI-NEXT: v_or_b32_e32 v55, v51, v37 -; SI-NEXT: v_alignbit_b32 v10, v55, v13, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v55, v13, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v17, v57, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v17, v57, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v17, v57, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v16, v21, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v16, v21, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v16, v21, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v47, v19, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v47, v19, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v47, v19, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v14, v43, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v14, v43, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v60, v58, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v60, v58, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v60, v58, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v24, v22, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v24, v22, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v24, v22, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v34, v4, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v34, v4, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v34, v4, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v59, v3, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v59, v3, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v59, v3, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v62, v49, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v62, v49, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v62, v49, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v25, v36, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v25, v36, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v25, v36, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v23, v35, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v23, v35, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v23, v35, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v61, v42, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v61, v42, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v10, v61, v42, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v10, v56, v52, 16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v10, v56, v52, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v55 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v17 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v16 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v47 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v14 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v60 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v59 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v62 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v25 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v23 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v61 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v56 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v54 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v12 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v20, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v18, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v15, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v33, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v44, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v31, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v30, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v9, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v8, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v6, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v5, 8, 8 -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_bfe_u32 v10, v26, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v2, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v48, v55, v13, 24 -; SI-NEXT: v_alignbit_b32 v42, v12, v11, 8 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 -; SI-NEXT: v_bfe_u32 v30, v7, 8, 8 -; SI-NEXT: v_mov_b32_e32 v26, v1 -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v37, 8, 8 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v10, v1, 8, 8 -; SI-NEXT: v_mov_b32_e32 v1, v37 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: .LBB95_5: ; %end -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_and_b32_e32 v37, 0xff, v13 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v52, 24, v48 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v37, v37, v51 -; SI-NEXT: v_and_b32_e32 v37, 0xffff, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v51, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 -; SI-NEXT: v_or_b32_e32 v51, v52, v51 -; SI-NEXT: v_or_b32_e32 v37, v37, v51 -; SI-NEXT: buffer_store_dword v37, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v37, 0xff, v55 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v37, v37, v51 -; SI-NEXT: v_and_b32_e32 v37, 0xffff, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v48, 24, v10 -; SI-NEXT: v_or_b32_e32 v20, v48, v20 -; SI-NEXT: v_or_b32_e32 v20, v37, v20 -; SI-NEXT: v_add_i32_e32 v37, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v20, v37, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v57 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v37, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v20, v20, v37 -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v37, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v48, 24, v10 -; SI-NEXT: v_or_b32_e32 v37, v48, v37 -; SI-NEXT: v_or_b32_e32 v20, v20, v37 -; SI-NEXT: v_add_i32_e32 v37, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v20, v37, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v17 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v37, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v20, v20, v37 -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v37, v18 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v18, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v21 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v18, v18, v20 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s21, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v58 +; SI-NEXT: s_or_b32 s20, s21, s20 +; SI-NEXT: v_readfirstlane_b32 s21, v31 +; SI-NEXT: s_lshl_b32 s21, s21, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s22, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v39 +; SI-NEXT: s_or_b32 s21, s22, s21 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 8 +; SI-NEXT: s_lshr_b32 s52, s21, 8 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s22, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v29 +; SI-NEXT: s_lshl_b32 s22, s22, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s23, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v52 +; SI-NEXT: s_or_b32 s22, s23, s22 +; SI-NEXT: v_readfirstlane_b32 s23, v32 +; SI-NEXT: s_lshl_b32 s23, s23, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s24, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v17 +; SI-NEXT: s_or_b32 s23, s24, s23 +; SI-NEXT: s_lshr_b32 s54, s23, 8 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s24, v14 +; SI-NEXT: s_lshl_b32 s24, s24, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v10 -; SI-NEXT: v_or_b32_e32 v20, v37, v20 -; SI-NEXT: v_or_b32_e32 v18, v18, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v18, v20, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s25, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v30 +; SI-NEXT: s_or_b32 s24, s25, s24 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v14 +; SI-NEXT: v_readfirstlane_b32 s25, v30 +; SI-NEXT: s_lshl_b32 s25, s25, 16 +; SI-NEXT: v_bfe_u32 v35, v30, 8, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v18, v18, v20 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s26, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v22 +; SI-NEXT: s_or_b32 s25, s26, s25 +; SI-NEXT: s_lshr_b32 s64, s25, 8 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s26, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v23 +; SI-NEXT: s_lshl_b32 s26, s26, 16 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s27, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v46 +; SI-NEXT: s_or_b32 s26, s27, s26 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v18 +; SI-NEXT: v_bfe_u32 v18, v44, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s27, v46 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshl_b32 s27, s27, 16 +; SI-NEXT: v_bfe_u32 v37, v46, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s28, v14 +; SI-NEXT: s_or_b32 s27, s28, s27 +; SI-NEXT: s_lshr_b32 s66, s27, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v15, v20, v15 -; SI-NEXT: v_or_b32_e32 v15, v18, v15 -; SI-NEXT: v_add_i32_e32 v18, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s28, v14 +; SI-NEXT: s_lshl_b32 s28, s28, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s29, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v56 +; SI-NEXT: s_or_b32 s28, s29, s28 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v15 +; SI-NEXT: v_bfe_u32 v15, v5, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s29, v56 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshl_b32 s29, s29, 16 +; SI-NEXT: v_bfe_u32 v38, v56, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s40, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s29, s40, s29 +; SI-NEXT: s_lshr_b32 s68, s29, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s40, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: s_lshl_b32 s40, s40, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s41, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s40, s41, s40 +; SI-NEXT: v_readfirstlane_b32 s41, v57 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshl_b32 s41, s41, 16 +; SI-NEXT: v_readfirstlane_b32 s42, v14 +; SI-NEXT: s_or_b32 s41, s42, s41 +; SI-NEXT: s_lshr_b32 s69, s41, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s42, v14 +; SI-NEXT: s_lshl_b32 s42, s42, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v43 -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v28 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s43, v14 +; SI-NEXT: s_or_b32 s42, s43, s42 +; SI-NEXT: v_readfirstlane_b32 s43, v12 +; SI-NEXT: s_lshl_b32 s43, s43, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s44, v14 +; SI-NEXT: s_or_b32 s43, s44, s43 +; SI-NEXT: s_lshr_b32 s70, s43, 8 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s44, v14 +; SI-NEXT: s_lshl_b32 s44, s44, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v44 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v1 +; SI-NEXT: v_mov_b32_e32 v1, v3 +; SI-NEXT: v_bfe_u32 v47, v1, 8, 8 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s45, v14 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: s_or_b32 s44, s45, s44 +; SI-NEXT: v_readfirstlane_b32 s45, v13 +; SI-NEXT: s_lshl_b32 s45, s45, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_readfirstlane_b32 s46, v14 +; SI-NEXT: s_or_b32 s45, s46, s45 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 4 +; SI-NEXT: v_writelane_b32 v62, s47, 5 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 2 +; SI-NEXT: v_writelane_b32 v62, s47, 3 +; SI-NEXT: s_lshr_b64 s[46:47], s[44:45], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 0 +; SI-NEXT: v_writelane_b32 v62, s47, 1 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 10 +; SI-NEXT: v_writelane_b32 v62, s47, 11 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 8 +; SI-NEXT: v_writelane_b32 v62, s47, 9 +; SI-NEXT: s_lshr_b64 s[46:47], s[42:43], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 6 +; SI-NEXT: v_writelane_b32 v62, s47, 7 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 16 +; SI-NEXT: v_writelane_b32 v62, s47, 17 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 14 +; SI-NEXT: v_writelane_b32 v62, s47, 15 +; SI-NEXT: s_lshr_b64 s[46:47], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 12 +; SI-NEXT: v_writelane_b32 v62, s47, 13 +; SI-NEXT: s_lshr_b64 s[46:47], s[28:29], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 22 +; SI-NEXT: v_writelane_b32 v62, s47, 23 +; SI-NEXT: s_lshr_b64 s[46:47], s[28:29], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 20 +; SI-NEXT: v_writelane_b32 v62, s47, 21 +; SI-NEXT: s_lshr_b64 s[46:47], s[28:29], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 18 +; SI-NEXT: v_writelane_b32 v62, s47, 19 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 28 +; SI-NEXT: v_writelane_b32 v62, s47, 29 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 26 +; SI-NEXT: v_writelane_b32 v62, s47, 27 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 24 +; SI-NEXT: v_writelane_b32 v62, s47, 25 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 34 +; SI-NEXT: v_writelane_b32 v62, s47, 35 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 32 +; SI-NEXT: v_writelane_b32 v62, s47, 33 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 30 +; SI-NEXT: v_writelane_b32 v62, s47, 31 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 40 +; SI-NEXT: v_writelane_b32 v62, s47, 41 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 38 +; SI-NEXT: v_writelane_b32 v62, s47, 39 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 8 +; SI-NEXT: v_writelane_b32 v62, s46, 36 +; SI-NEXT: v_writelane_b32 v62, s47, 37 +; SI-NEXT: s_lshr_b64 s[46:47], s[20:21], 24 +; SI-NEXT: v_writelane_b32 v62, s46, 44 +; SI-NEXT: v_writelane_b32 v62, s47, 45 +; SI-NEXT: s_lshr_b64 s[46:47], s[20:21], 16 +; SI-NEXT: v_writelane_b32 v62, s46, 42 +; SI-NEXT: v_writelane_b32 v62, s47, 43 +; SI-NEXT: v_writelane_b32 v62, vcc_lo, 48 +; SI-NEXT: v_writelane_b32 v62, vcc_hi, 49 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 8 +; SI-NEXT: v_writelane_b32 v62, vcc_lo, 46 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v62, vcc_hi, 47 +; SI-NEXT: s_lshr_b32 s71, s45, 8 +; SI-NEXT: .LBB95_5: ; %end +; SI-NEXT: v_readlane_b32 vcc_lo, v62, 0 +; SI-NEXT: v_readlane_b32 vcc_hi, v62, 1 +; SI-NEXT: s_lshl_b32 s47, vcc_lo, 8 +; SI-NEXT: v_readlane_b32 vcc_lo, v62, 2 +; SI-NEXT: s_and_b32 s44, s44, 0xff +; SI-NEXT: v_readlane_b32 vcc_hi, v62, 3 +; SI-NEXT: s_or_b32 s44, s44, s47 +; SI-NEXT: s_and_b32 s47, vcc_lo, 0xff +; SI-NEXT: v_readlane_b32 vcc_lo, v62, 4 +; SI-NEXT: s_lshl_b32 s57, vcc_lo, 24 +; SI-NEXT: s_lshl_b32 s47, s47, 16 +; SI-NEXT: s_or_b32 s47, s57, s47 +; SI-NEXT: s_and_b32 s44, s44, 0xffff +; SI-NEXT: s_or_b32 s44, s44, s47 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_mov_b32_e32 v16, s44 +; SI-NEXT: s_and_b32 s44, s45, 0xff +; SI-NEXT: s_lshl_b32 s45, s71, 8 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: s_or_b32 s44, s44, s45 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v24 +; SI-NEXT: s_and_b32 s44, s44, 0xffff +; SI-NEXT: v_or_b32_e32 v13, v21, v13 +; SI-NEXT: v_or_b32_e32 v13, s44, v13 +; SI-NEXT: v_readlane_b32 s44, v62, 6 +; SI-NEXT: v_readlane_b32 s45, v62, 7 +; SI-NEXT: s_lshl_b32 s44, s44, 8 +; SI-NEXT: s_and_b32 s42, s42, 0xff +; SI-NEXT: v_readlane_b32 vcc_hi, v62, 5 +; SI-NEXT: s_or_b32 s42, s42, s44 +; SI-NEXT: v_readlane_b32 s44, v62, 8 +; SI-NEXT: v_readlane_b32 s45, v62, 9 +; SI-NEXT: s_and_b32 s44, s44, 0xff +; SI-NEXT: v_readlane_b32 vcc_lo, v62, 10 +; SI-NEXT: s_lshl_b32 s45, vcc_lo, 24 +; SI-NEXT: s_lshl_b32 s44, s44, 16 +; SI-NEXT: s_or_b32 s44, s45, s44 +; SI-NEXT: s_and_b32 s42, s42, 0xffff +; SI-NEXT: s_or_b32 s42, s42, s44 +; SI-NEXT: v_mov_b32_e32 v21, s42 +; SI-NEXT: s_and_b32 s42, s43, 0xff +; SI-NEXT: s_lshl_b32 s43, s70, 8 +; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 +; SI-NEXT: s_or_b32 s42, s42, s43 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v48 +; SI-NEXT: s_and_b32 s42, s42, 0xffff +; SI-NEXT: v_or_b32_e32 v12, v23, v12 +; SI-NEXT: v_or_b32_e32 v12, s42, v12 +; SI-NEXT: v_readlane_b32 s42, v62, 12 +; SI-NEXT: v_readlane_b32 s43, v62, 13 +; SI-NEXT: s_lshl_b32 s42, s42, 8 +; SI-NEXT: s_and_b32 s40, s40, 0xff +; SI-NEXT: s_or_b32 s40, s40, s42 +; SI-NEXT: v_readlane_b32 s42, v62, 14 +; SI-NEXT: v_readlane_b32 s43, v62, 15 +; SI-NEXT: s_and_b32 s42, s42, 0xff +; SI-NEXT: v_readlane_b32 s44, v62, 16 +; SI-NEXT: s_lshl_b32 s43, s44, 24 +; SI-NEXT: s_lshl_b32 s42, s42, 16 +; SI-NEXT: s_or_b32 s42, s43, s42 +; SI-NEXT: s_and_b32 s40, s40, 0xffff +; SI-NEXT: s_or_b32 s40, s40, s42 +; SI-NEXT: v_mov_b32_e32 v23, s40 +; SI-NEXT: s_and_b32 s40, s41, 0xff +; SI-NEXT: s_lshl_b32 s41, s69, 8 +; SI-NEXT: v_and_b32_e32 v11, 0xff, v57 +; SI-NEXT: s_or_b32 s40, s40, s41 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v50 +; SI-NEXT: s_and_b32 s40, s40, 0xffff +; SI-NEXT: v_or_b32_e32 v11, v24, v11 +; SI-NEXT: v_or_b32_e32 v11, s40, v11 +; SI-NEXT: v_readlane_b32 s40, v62, 18 +; SI-NEXT: v_readlane_b32 s41, v62, 19 +; SI-NEXT: s_lshl_b32 s40, s40, 8 +; SI-NEXT: s_and_b32 s28, s28, 0xff +; SI-NEXT: s_or_b32 s28, s28, s40 +; SI-NEXT: v_readlane_b32 s40, v62, 20 +; SI-NEXT: v_readlane_b32 s41, v62, 21 +; SI-NEXT: s_and_b32 s40, s40, 0xff +; SI-NEXT: v_readlane_b32 s42, v62, 22 +; SI-NEXT: s_lshl_b32 s41, s42, 24 +; SI-NEXT: s_lshl_b32 s40, s40, 16 +; SI-NEXT: s_or_b32 s40, s41, s40 +; SI-NEXT: s_and_b32 s28, s28, 0xffff +; SI-NEXT: s_or_b32 s28, s28, s40 +; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: s_and_b32 s28, s29, 0xff +; SI-NEXT: s_lshl_b32 s29, s68, 8 +; SI-NEXT: v_and_b32_e32 v25, 0xff, v56 +; SI-NEXT: s_or_b32 s28, s28, s29 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v38 +; SI-NEXT: s_and_b32 s28, s28, 0xffff +; SI-NEXT: v_or_b32_e32 v25, v26, v25 +; SI-NEXT: v_or_b32_e32 v25, s28, v25 +; SI-NEXT: v_readlane_b32 s28, v62, 24 +; SI-NEXT: v_readlane_b32 s29, v62, 25 +; SI-NEXT: s_lshl_b32 s28, s28, 8 +; SI-NEXT: s_and_b32 s26, s26, 0xff +; SI-NEXT: s_or_b32 s26, s26, s28 +; SI-NEXT: v_readlane_b32 s28, v62, 26 +; SI-NEXT: v_readlane_b32 s29, v62, 27 +; SI-NEXT: s_and_b32 s28, s28, 0xff +; SI-NEXT: v_readlane_b32 s40, v62, 28 +; SI-NEXT: s_lshl_b32 s29, s40, 24 +; SI-NEXT: s_lshl_b32 s28, s28, 16 +; SI-NEXT: s_or_b32 s28, s29, s28 +; SI-NEXT: s_and_b32 s26, s26, 0xffff +; SI-NEXT: s_or_b32 s26, s26, s28 +; SI-NEXT: v_mov_b32_e32 v26, s26 +; SI-NEXT: s_and_b32 s26, s27, 0xff +; SI-NEXT: s_lshl_b32 s27, s66, 8 +; SI-NEXT: v_and_b32_e32 v27, 0xff, v46 +; SI-NEXT: s_or_b32 s26, s26, s27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v37 +; SI-NEXT: s_and_b32 s26, s26, 0xffff +; SI-NEXT: v_or_b32_e32 v27, v28, v27 +; SI-NEXT: v_or_b32_e32 v27, s26, v27 +; SI-NEXT: v_readlane_b32 s26, v62, 30 +; SI-NEXT: v_readlane_b32 s27, v62, 31 +; SI-NEXT: s_lshl_b32 s26, s26, 8 +; SI-NEXT: s_and_b32 s24, s24, 0xff +; SI-NEXT: s_or_b32 s24, s24, s26 +; SI-NEXT: v_readlane_b32 s26, v62, 32 +; SI-NEXT: v_readlane_b32 s27, v62, 33 +; SI-NEXT: s_and_b32 s26, s26, 0xff +; SI-NEXT: v_readlane_b32 s28, v62, 34 +; SI-NEXT: s_lshl_b32 s27, s28, 24 +; SI-NEXT: s_lshl_b32 s26, s26, 16 +; SI-NEXT: v_readlane_b32 vcc_hi, v62, 11 +; SI-NEXT: s_or_b32 s26, s27, s26 +; SI-NEXT: s_and_b32 s24, s24, 0xffff +; SI-NEXT: s_or_b32 s24, s24, s26 +; SI-NEXT: buffer_store_dword v16, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v58 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v16, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v28, s24 +; SI-NEXT: s_and_b32 s24, s25, 0xff +; SI-NEXT: s_lshl_b32 s25, s64, 8 +; SI-NEXT: v_and_b32_e32 v29, 0xff, v30 +; SI-NEXT: buffer_store_dword v13, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v60 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v13, vcc, 8, v0 +; SI-NEXT: s_or_b32 s24, s24, s25 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v35 +; SI-NEXT: buffer_store_dword v21, v13, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v13, vcc, 12, v0 +; SI-NEXT: s_and_b32 s24, s24, 0xffff +; SI-NEXT: v_or_b32_e32 v29, v30, v29 +; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v22 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xff, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v10 -; SI-NEXT: v_or_b32_e32 v18, v20, v18 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v12, vcc, 16, v0 +; SI-NEXT: v_or_b32_e32 v29, s24, v29 +; SI-NEXT: buffer_store_dword v23, v12, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v12, vcc, 20, v0 +; SI-NEXT: v_readlane_b32 s24, v62, 36 +; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v24 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v10 +; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 +; SI-NEXT: v_readlane_b32 s25, v62, 37 +; SI-NEXT: s_lshl_b32 s24, s24, 8 +; SI-NEXT: s_and_b32 s22, s22, 0xff +; SI-NEXT: buffer_store_dword v24, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 +; SI-NEXT: s_or_b32 s22, s22, s24 +; SI-NEXT: v_readlane_b32 s24, v62, 38 +; SI-NEXT: buffer_store_dword v25, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 +; SI-NEXT: v_readlane_b32 s25, v62, 39 +; SI-NEXT: s_and_b32 s24, s24, 0xff +; SI-NEXT: v_readlane_b32 s26, v62, 40 +; SI-NEXT: buffer_store_dword v26, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 +; SI-NEXT: s_lshl_b32 s24, s24, 16 +; SI-NEXT: s_lshl_b32 s25, s26, 24 +; SI-NEXT: buffer_store_dword v27, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 +; SI-NEXT: s_and_b32 s22, s22, 0xffff +; SI-NEXT: s_or_b32 s24, s25, s24 +; SI-NEXT: buffer_store_dword v28, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 44, v0 +; SI-NEXT: s_or_b32 s22, s22, s24 +; SI-NEXT: buffer_store_dword v29, v11, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 +; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: s_and_b32 s22, s23, 0xff +; SI-NEXT: s_lshl_b32 s23, s54, 8 ; SI-NEXT: v_and_b32_e32 v10, 0xff, v32 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 +; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s22, s22, s23 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v13 -; SI-NEXT: v_or_b32_e32 v10, v18, v10 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v10, v15, v10 -; SI-NEXT: v_add_i32_e32 v15, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v10, v15, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v13 -; SI-NEXT: v_or_b32_e32 v15, v18, v15 -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_add_i32_e32 v15, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v10, v15, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v43 +; SI-NEXT: s_and_b32 s22, s22, 0xffff +; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: v_or_b32_e32 v10, s22, v10 +; SI-NEXT: s_and_b32 s20, s20, 0xff +; SI-NEXT: s_lshl_b32 s22, s62, 8 +; SI-NEXT: s_or_b32 s20, s20, s22 +; SI-NEXT: v_readlane_b32 s22, v62, 42 +; SI-NEXT: v_readlane_b32 s23, v62, 43 +; SI-NEXT: s_and_b32 s22, s22, 0xff +; SI-NEXT: v_readlane_b32 s24, v62, 44 +; SI-NEXT: s_lshl_b32 s22, s22, 16 +; SI-NEXT: s_lshl_b32 s23, s24, 24 +; SI-NEXT: s_and_b32 s20, s20, 0xffff +; SI-NEXT: s_or_b32 s22, s23, s22 +; SI-NEXT: v_add_i32_e32 v11, vcc, 52, v0 +; SI-NEXT: s_or_b32 s20, s20, s22 +; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v29 -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v13 -; SI-NEXT: v_or_b32_e32 v9, v15, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 56, v0 +; SI-NEXT: v_mov_b32_e32 v11, s20 +; SI-NEXT: s_and_b32 s20, s21, 0xff +; SI-NEXT: s_lshl_b32 s21, s52, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v31 +; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s20, s20, s21 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v20 +; SI-NEXT: s_and_b32 s20, s20, 0xffff ; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_or_b32_e32 v9, s20, v9 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_lshl_b32 s20, s58, 8 +; SI-NEXT: s_or_b32 s18, s18, s20 +; SI-NEXT: s_and_b32 s20, s98, 0xff +; SI-NEXT: s_lshl_b32 s20, s20, 16 +; SI-NEXT: s_lshl_b32 s21, s96, 24 +; SI-NEXT: s_and_b32 s18, s18, 0xffff +; SI-NEXT: s_or_b32 s20, s21, s20 ; SI-NEXT: v_add_i32_e32 v10, vcc, 60, v0 +; SI-NEXT: s_or_b32 s18, s18, s20 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xff, v3 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v13 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v15, v10 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xff, v59 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v3 -; SI-NEXT: v_or_b32_e32 v8, v10, v8 +; SI-NEXT: v_add_i32_e32 v9, vcc, 64, v0 +; SI-NEXT: v_mov_b32_e32 v10, s18 +; SI-NEXT: s_and_b32 s18, s19, 0xff +; SI-NEXT: s_lshl_b32 s19, s50, 8 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v54 +; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s18, s18, s19 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v19 +; SI-NEXT: s_and_b32 s18, s18, 0xffff ; SI-NEXT: v_or_b32_e32 v8, v9, v8 +; SI-NEXT: v_or_b32_e32 v8, s18, v8 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s18, s38, 8 +; SI-NEXT: s_or_b32 s16, s16, s18 +; SI-NEXT: s_and_b32 s18, s36, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s34, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s18, s19, s18 ; SI-NEXT: v_add_i32_e32 v9, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s18 ; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v49 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xff, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v4 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v62 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v4 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v30 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 +; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 +; SI-NEXT: v_mov_b32_e32 v9, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xff +; SI-NEXT: s_lshl_b32 s17, s48, 8 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v44 +; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v18 +; SI-NEXT: s_and_b32 s16, s16, 0xffff ; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_or_b32_e32 v7, s16, v7 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: s_lshl_b32 s16, s30, 8 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: s_and_b32 s16, s94, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s92, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 ; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 ; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v36 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v4 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v25 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v4 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v3 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v6, v8, v6 +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 +; SI-NEXT: v_mov_b32_e32 v8, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: s_lshl_b32 s15, s67, 8 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v42 +; SI-NEXT: s_and_b32 s14, s14, 0xffff ; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: v_or_b32_e32 v6, s14, v6 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s14, s90, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: s_and_b32 s14, s88, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s78, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 ; SI-NEXT: v_add_i32_e32 v7, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v35 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v23 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v4 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v3 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 +; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 +; SI-NEXT: v_mov_b32_e32 v7, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: s_lshl_b32 s13, s65, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v15 +; SI-NEXT: s_and_b32 s12, s12, 0xffff ; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v5, s12, v5 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s12, s76, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: s_and_b32 s12, s74, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s72, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 ; SI-NEXT: v_add_i32_e32 v6, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 ; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v61 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v4 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v3 +; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 +; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s55, 8 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v55 +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s10, s10, s11 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v45 +; SI-NEXT: s_and_b32 s10, s10, 0xffff ; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: v_or_b32_e32 v4, s10, v4 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s60, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s56, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s46, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_add_i32_e32 v5, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 ; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v40 -; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v56 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: v_or_b32_e32 v2, v5, v2 +; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s53, 8 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v61 +; SI-NEXT: s_and_b32 s8, s8, 0xffff ; SI-NEXT: v_or_b32_e32 v2, v4, v2 +; SI-NEXT: v_or_b32_e32 v2, s8, v2 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s86, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s84, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s82, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 ; SI-NEXT: v_add_i32_e32 v4, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 ; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v53 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v38 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v54 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: v_or_b32_e32 v1, v4, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 +; SI-NEXT: v_mov_b32_e32 v4, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s51, 8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v47 +; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, s6, v1 +; SI-NEXT: v_readlane_b32 s6, v62, 46 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: v_readlane_b32 s7, v62, 47 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s80, 0xff +; SI-NEXT: v_readlane_b32 s8, v62, 48 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s8, 24 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v42 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v27 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v4, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s49, 8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v53 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v33 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s45, v62, 17 +; SI-NEXT: v_readlane_b32 s43, v62, 23 +; SI-NEXT: v_readlane_b32 s41, v62, 29 +; SI-NEXT: v_readlane_b32 s29, v62, 35 +; SI-NEXT: v_readlane_b32 s27, v62, 41 +; SI-NEXT: v_readlane_b32 s25, v62, 45 +; SI-NEXT: v_readlane_b32 s9, v62, 49 +; SI-NEXT: v_readlane_b32 s99, v63, 35 +; SI-NEXT: v_readlane_b32 s98, v63, 34 +; SI-NEXT: v_readlane_b32 s97, v63, 33 +; SI-NEXT: v_readlane_b32 s96, v63, 32 +; SI-NEXT: v_readlane_b32 s87, v63, 31 +; SI-NEXT: v_readlane_b32 s86, v63, 30 +; SI-NEXT: v_readlane_b32 s85, v63, 29 +; SI-NEXT: v_readlane_b32 s84, v63, 28 +; SI-NEXT: v_readlane_b32 s83, v63, 27 +; SI-NEXT: v_readlane_b32 s82, v63, 26 +; SI-NEXT: v_readlane_b32 s81, v63, 25 +; SI-NEXT: v_readlane_b32 s80, v63, 24 +; SI-NEXT: v_readlane_b32 s71, v63, 23 +; SI-NEXT: v_readlane_b32 s70, v63, 22 +; SI-NEXT: v_readlane_b32 s69, v63, 21 +; SI-NEXT: v_readlane_b32 s68, v63, 20 +; SI-NEXT: v_readlane_b32 s67, v63, 19 +; SI-NEXT: v_readlane_b32 s66, v63, 18 +; SI-NEXT: v_readlane_b32 s65, v63, 17 +; SI-NEXT: v_readlane_b32 s64, v63, 16 +; SI-NEXT: v_readlane_b32 s55, v63, 15 +; SI-NEXT: v_readlane_b32 s54, v63, 14 +; SI-NEXT: v_readlane_b32 s53, v63, 13 +; SI-NEXT: v_readlane_b32 s52, v63, 12 +; SI-NEXT: v_readlane_b32 s51, v63, 11 +; SI-NEXT: v_readlane_b32 s50, v63, 10 +; SI-NEXT: v_readlane_b32 s49, v63, 9 +; SI-NEXT: v_readlane_b32 s48, v63, 8 +; SI-NEXT: v_readlane_b32 s39, v63, 7 +; SI-NEXT: v_readlane_b32 s38, v63, 6 +; SI-NEXT: v_readlane_b32 s37, v63, 5 +; SI-NEXT: v_readlane_b32 s36, v63, 4 +; SI-NEXT: v_readlane_b32 s35, v63, 3 +; SI-NEXT: v_readlane_b32 s34, v63, 2 +; SI-NEXT: v_readlane_b32 s31, v63, 1 +; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; @@ -198090,1615 +199680,1736 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:332 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:328 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:324 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:320 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:308 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:304 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:300 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:296 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:292 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:288 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:276 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:272 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:268 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:264 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:256 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:244 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:240 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:332 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:328 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:320 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:316 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:312 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:308 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:304 +; SI-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:236 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:232 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:228 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:224 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:212 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:208 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:200 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:196 -; SI-NEXT: v_writelane_b32 v63, s30, 0 -; SI-NEXT: v_writelane_b32 v63, s31, 1 -; SI-NEXT: v_writelane_b32 v63, s34, 2 -; SI-NEXT: v_writelane_b32 v63, s35, 3 -; SI-NEXT: v_writelane_b32 v63, s36, 4 -; SI-NEXT: v_writelane_b32 v63, s37, 5 -; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane -; SI-NEXT: v_writelane_b32 v63, s38, 6 -; SI-NEXT: v_writelane_b32 v62, s28, 0 -; SI-NEXT: v_writelane_b32 v63, s39, 7 -; SI-NEXT: v_writelane_b32 v62, s25, 1 -; SI-NEXT: v_writelane_b32 v63, s48, 8 -; SI-NEXT: v_writelane_b32 v62, s24, 2 -; SI-NEXT: v_writelane_b32 v63, s49, 9 -; SI-NEXT: v_writelane_b32 v62, s23, 3 -; SI-NEXT: v_writelane_b32 v63, s50, 10 -; SI-NEXT: v_writelane_b32 v62, s22, 4 -; SI-NEXT: v_writelane_b32 v63, s51, 11 -; SI-NEXT: v_writelane_b32 v62, s21, 5 -; SI-NEXT: v_writelane_b32 v63, s52, 12 -; SI-NEXT: v_writelane_b32 v62, s18, 6 -; SI-NEXT: v_writelane_b32 v63, s53, 13 -; SI-NEXT: v_writelane_b32 v62, s16, 7 -; SI-NEXT: v_writelane_b32 v63, s54, 14 -; SI-NEXT: v_writelane_b32 v63, s55, 15 -; SI-NEXT: v_writelane_b32 v63, s64, 16 -; SI-NEXT: v_writelane_b32 v63, s65, 17 -; SI-NEXT: v_writelane_b32 v63, s66, 18 -; SI-NEXT: v_writelane_b32 v63, s67, 19 -; SI-NEXT: v_writelane_b32 v63, s68, 20 -; SI-NEXT: v_writelane_b32 v63, s69, 21 -; SI-NEXT: v_writelane_b32 v63, s70, 22 -; SI-NEXT: v_writelane_b32 v63, s71, 23 -; SI-NEXT: v_writelane_b32 v63, s80, 24 -; SI-NEXT: v_writelane_b32 v63, s81, 25 -; SI-NEXT: v_writelane_b32 v63, s82, 26 -; SI-NEXT: v_writelane_b32 v63, s83, 27 -; SI-NEXT: v_writelane_b32 v63, s84, 28 -; SI-NEXT: v_writelane_b32 v63, s85, 29 -; SI-NEXT: v_writelane_b32 v63, s86, 30 -; SI-NEXT: v_writelane_b32 v63, s87, 31 -; SI-NEXT: v_writelane_b32 v63, s96, 32 -; SI-NEXT: v_writelane_b32 v63, s97, 33 -; SI-NEXT: v_writelane_b32 v63, s98, 34 -; SI-NEXT: v_readfirstlane_b32 s15, v16 -; SI-NEXT: v_readfirstlane_b32 s18, v25 -; SI-NEXT: v_readfirstlane_b32 s43, v15 -; SI-NEXT: v_readfirstlane_b32 s42, v24 -; SI-NEXT: v_readfirstlane_b32 s44, v23 -; SI-NEXT: v_readfirstlane_b32 s49, v12 -; SI-NEXT: v_readfirstlane_b32 s8, v11 -; SI-NEXT: v_readfirstlane_b32 s53, v20 -; SI-NEXT: v_readfirstlane_b32 s40, v19 -; SI-NEXT: v_readfirstlane_b32 s23, v28 -; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_writelane_b32 v41, s30, 0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_writelane_b32 v43, s29, 0 +; SI-NEXT: v_writelane_b32 v43, s28, 1 +; SI-NEXT: v_writelane_b32 v43, s27, 2 +; SI-NEXT: v_writelane_b32 v43, s26, 3 +; SI-NEXT: v_writelane_b32 v43, s25, 4 +; SI-NEXT: v_writelane_b32 v43, s24, 5 +; SI-NEXT: v_writelane_b32 v43, s23, 6 +; SI-NEXT: v_writelane_b32 v43, s22, 7 +; SI-NEXT: v_writelane_b32 v43, s21, 8 +; SI-NEXT: v_writelane_b32 v43, s20, 9 +; SI-NEXT: v_writelane_b32 v43, s19, 10 +; SI-NEXT: v_writelane_b32 v43, s18, 11 +; SI-NEXT: v_writelane_b32 v43, s17, 12 +; SI-NEXT: v_writelane_b32 v43, s16, 13 +; SI-NEXT: v_writelane_b32 v41, s31, 1 +; SI-NEXT: v_writelane_b32 v41, s34, 2 +; SI-NEXT: v_writelane_b32 v41, s35, 3 +; SI-NEXT: v_writelane_b32 v41, s36, 4 +; SI-NEXT: v_writelane_b32 v41, s37, 5 +; SI-NEXT: v_writelane_b32 v41, s38, 6 +; SI-NEXT: v_writelane_b32 v41, s39, 7 +; SI-NEXT: v_writelane_b32 v41, s48, 8 +; SI-NEXT: v_writelane_b32 v41, s49, 9 +; SI-NEXT: v_writelane_b32 v41, s50, 10 +; SI-NEXT: v_writelane_b32 v41, s51, 11 +; SI-NEXT: v_writelane_b32 v41, s52, 12 +; SI-NEXT: v_writelane_b32 v41, s53, 13 +; SI-NEXT: v_writelane_b32 v41, s54, 14 +; SI-NEXT: v_writelane_b32 v41, s55, 15 +; SI-NEXT: v_writelane_b32 v41, s64, 16 +; SI-NEXT: v_writelane_b32 v41, s65, 17 +; SI-NEXT: v_writelane_b32 v41, s66, 18 +; SI-NEXT: v_writelane_b32 v41, s67, 19 +; SI-NEXT: v_writelane_b32 v41, s68, 20 +; SI-NEXT: v_writelane_b32 v41, s69, 21 +; SI-NEXT: v_writelane_b32 v41, s70, 22 +; SI-NEXT: v_writelane_b32 v41, s71, 23 +; SI-NEXT: v_writelane_b32 v41, s80, 24 +; SI-NEXT: v_writelane_b32 v41, s81, 25 +; SI-NEXT: v_writelane_b32 v41, s82, 26 +; SI-NEXT: v_writelane_b32 v41, s83, 27 +; SI-NEXT: v_writelane_b32 v41, s84, 28 +; SI-NEXT: v_writelane_b32 v41, s85, 29 +; SI-NEXT: v_writelane_b32 v41, s86, 30 +; SI-NEXT: v_writelane_b32 v41, s87, 31 +; SI-NEXT: v_writelane_b32 v41, s96, 32 +; SI-NEXT: v_writelane_b32 v41, s97, 33 +; SI-NEXT: v_writelane_b32 v41, s98, 34 +; SI-NEXT: v_readfirstlane_b32 s39, v26 +; SI-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane +; SI-NEXT: v_readfirstlane_b32 s47, v12 +; SI-NEXT: v_writelane_b32 v42, s39, 0 +; SI-NEXT: v_readfirstlane_b32 s56, v11 +; SI-NEXT: v_writelane_b32 v42, s47, 1 +; SI-NEXT: v_readfirstlane_b32 s48, v24 +; SI-NEXT: v_writelane_b32 v42, s56, 2 +; SI-NEXT: v_readfirstlane_b32 s49, v23 +; SI-NEXT: v_writelane_b32 v42, s48, 3 +; SI-NEXT: v_readfirstlane_b32 s50, v21 +; SI-NEXT: v_writelane_b32 v42, s49, 4 +; SI-NEXT: v_readfirstlane_b32 s51, v22 +; SI-NEXT: v_writelane_b32 v42, s50, 5 +; SI-NEXT: v_writelane_b32 v42, s51, 6 +; SI-NEXT: v_readfirstlane_b32 s57, v20 +; SI-NEXT: v_readfirstlane_b32 s58, v19 +; SI-NEXT: v_readfirstlane_b32 s64, v29 +; SI-NEXT: v_readfirstlane_b32 s65, v30 +; SI-NEXT: v_readfirstlane_b32 s59, v28 +; SI-NEXT: v_readfirstlane_b32 s60, v27 +; SI-NEXT: v_readfirstlane_b32 s11, v1 +; SI-NEXT: v_readfirstlane_b32 s12, v2 +; SI-NEXT: v_readfirstlane_b32 s13, v9 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: v_writelane_b32 v43, s4, 14 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:300 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:296 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:292 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:288 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:284 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:280 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s4, v32 +; SI-NEXT: v_writelane_b32 v43, s4, 15 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v33 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:276 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:272 +; SI-NEXT: v_writelane_b32 v43, s4, 16 +; SI-NEXT: s_waitcnt vmcnt(12) ; SI-NEXT: v_readfirstlane_b32 s4, v34 -; SI-NEXT: v_writelane_b32 v62, s4, 8 -; SI-NEXT: v_readfirstlane_b32 s4, v38 -; SI-NEXT: v_writelane_b32 v62, s4, 9 +; SI-NEXT: v_writelane_b32 v43, s4, 17 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v35 +; SI-NEXT: v_writelane_b32 v43, s4, 18 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s44, v36 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s90, v37 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:268 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:264 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:260 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:256 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s6, v38 +; SI-NEXT: v_readfirstlane_b32 s14, v10 +; SI-NEXT: v_readfirstlane_b32 s15, v8 +; SI-NEXT: v_readfirstlane_b32 s18, v7 +; SI-NEXT: v_readfirstlane_b32 s21, v5 +; SI-NEXT: v_readfirstlane_b32 s22, v6 +; SI-NEXT: v_readfirstlane_b32 s40, v17 +; SI-NEXT: v_readfirstlane_b32 s41, v18 +; SI-NEXT: v_readfirstlane_b32 s42, v4 +; SI-NEXT: v_readfirstlane_b32 s43, v3 +; SI-NEXT: v_readfirstlane_b32 s76, v16 +; SI-NEXT: v_readfirstlane_b32 s77, v15 +; SI-NEXT: v_readfirstlane_b32 s38, v25 +; SI-NEXT: v_writelane_b32 v41, s99, 35 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: v_writelane_b32 v43, s4, 19 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s4, v39 +; SI-NEXT: v_writelane_b32 v43, s4, 20 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s4, v48 +; SI-NEXT: v_writelane_b32 v43, s4, 21 +; SI-NEXT: s_waitcnt vmcnt(8) ; SI-NEXT: v_readfirstlane_b32 s4, v49 -; SI-NEXT: v_writelane_b32 v62, s4, 10 +; SI-NEXT: v_writelane_b32 v43, s4, 22 +; SI-NEXT: s_waitcnt vmcnt(7) ; SI-NEXT: v_readfirstlane_b32 s4, v50 -; SI-NEXT: v_writelane_b32 v62, s4, 11 -; SI-NEXT: v_readfirstlane_b32 s79, v52 -; SI-NEXT: v_readfirstlane_b32 s88, v54 -; SI-NEXT: v_readfirstlane_b32 s4, v55 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:192 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:180 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:176 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:172 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:168 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:164 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:160 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:148 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:144 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:140 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:136 -; SI-NEXT: v_writelane_b32 v62, s4, 12 -; SI-NEXT: v_readfirstlane_b32 s77, v41 -; SI-NEXT: v_readfirstlane_b32 s4, v42 -; SI-NEXT: v_readfirstlane_b32 s94, v31 -; SI-NEXT: v_readfirstlane_b32 s70, v32 -; SI-NEXT: v_readfirstlane_b32 s51, v33 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s37, v45 -; SI-NEXT: v_readfirstlane_b32 s24, v56 -; SI-NEXT: v_readfirstlane_b32 s7, v57 -; SI-NEXT: v_readfirstlane_b32 s92, v58 -; SI-NEXT: v_readfirstlane_b32 s28, v59 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:132 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:116 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108 -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:96 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:84 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:80 -; SI-NEXT: v_readfirstlane_b32 s68, v35 -; SI-NEXT: v_readfirstlane_b32 s21, v36 -; SI-NEXT: v_readfirstlane_b32 s87, v37 -; SI-NEXT: v_readfirstlane_b32 s67, v39 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:64 -; SI-NEXT: v_readfirstlane_b32 s35, v43 -; SI-NEXT: v_readfirstlane_b32 s55, v46 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s74, v53 -; SI-NEXT: v_readfirstlane_b32 s85, v48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:40 -; SI-NEXT: v_writelane_b32 v62, s4, 13 -; SI-NEXT: v_readfirstlane_b32 s98, v40 -; SI-NEXT: v_readfirstlane_b32 s69, v51 -; SI-NEXT: v_readfirstlane_b32 s34, v27 -; SI-NEXT: v_writelane_b32 v63, s99, 35 -; SI-NEXT: v_readfirstlane_b32 s97, v26 -; SI-NEXT: v_readfirstlane_b32 s80, v18 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v22 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v30 -; SI-NEXT: v_readfirstlane_b32 s96, v17 -; SI-NEXT: v_readfirstlane_b32 s64, v9 -; SI-NEXT: v_readfirstlane_b32 s25, v8 -; SI-NEXT: v_readfirstlane_b32 s83, v7 -; SI-NEXT: v_readfirstlane_b32 s84, v4 -; SI-NEXT: v_readfirstlane_b32 s93, v3 -; SI-NEXT: v_readfirstlane_b32 s76, v1 -; SI-NEXT: v_readfirstlane_b32 s58, v38 -; SI-NEXT: v_readfirstlane_b32 s65, v49 -; SI-NEXT: v_readfirstlane_b32 s62, v54 -; SI-NEXT: v_readfirstlane_b32 s81, v44 -; SI-NEXT: v_readfirstlane_b32 s71, v47 -; SI-NEXT: v_readfirstlane_b32 s38, v60 -; SI-NEXT: v_readfirstlane_b32 s86, v61 -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:220 -; SI-NEXT: v_readfirstlane_b32 s90, v50 -; SI-NEXT: v_readfirstlane_b32 s31, v52 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s4, v55 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 -; SI-NEXT: v_readfirstlane_b32 s72, v31 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:316 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:72 -; SI-NEXT: v_readfirstlane_b32 s82, v56 -; SI-NEXT: v_readfirstlane_b32 s95, v57 +; SI-NEXT: v_writelane_b32 v43, s4, 23 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s4, v51 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:252 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:248 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:244 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:240 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:236 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:232 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:228 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s91, v32 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s8, v33 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:224 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; SI-NEXT: v_writelane_b32 v43, s4, 24 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s4, v34 +; SI-NEXT: v_writelane_b32 v43, s4, 25 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v35 +; SI-NEXT: v_writelane_b32 v43, s4, 26 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s4, v36 +; SI-NEXT: v_writelane_b32 v43, s4, 27 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s4, v37 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:216 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:212 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:208 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:204 +; SI-NEXT: v_writelane_b32 v43, s4, 28 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: v_writelane_b32 v43, s4, 29 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s89, v38 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s78, v39 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s7, v48 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s82, v49 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s4, v50 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s96, v51 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:200 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:196 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:192 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:188 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:184 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:180 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:176 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s70, v33 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:168 +; SI-NEXT: v_writelane_b32 v43, s4, 30 +; SI-NEXT: v_readfirstlane_b32 s4, v32 +; SI-NEXT: v_writelane_b32 v43, s4, 31 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:164 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:160 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:156 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:152 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s39, v58 -; SI-NEXT: v_readfirstlane_b32 s56, v59 -; SI-NEXT: v_readfirstlane_b32 s57, v41 -; SI-NEXT: v_readfirstlane_b32 s36, v42 -; SI-NEXT: v_readfirstlane_b32 s73, v45 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:284 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:252 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:124 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92 -; SI-NEXT: v_readfirstlane_b32 s16, v34 -; SI-NEXT: v_readfirstlane_b32 s48, v32 -; SI-NEXT: v_readfirstlane_b32 s52, v33 -; SI-NEXT: v_readfirstlane_b32 s47, v35 -; SI-NEXT: v_readfirstlane_b32 s60, v37 -; SI-NEXT: v_readfirstlane_b32 s61, v39 -; SI-NEXT: v_writelane_b32 v62, s4, 14 -; SI-NEXT: v_readfirstlane_b32 s54, v48 -; SI-NEXT: v_readfirstlane_b32 s89, v43 -; SI-NEXT: v_readfirstlane_b32 s50, v53 +; SI-NEXT: v_readfirstlane_b32 s4, v34 +; SI-NEXT: v_writelane_b32 v43, s4, 32 +; SI-NEXT: v_readfirstlane_b32 s9, v35 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_readfirstlane_b32 s4, v37 +; SI-NEXT: v_writelane_b32 v43, s4, 33 +; SI-NEXT: v_readfirstlane_b32 s10, v36 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s4, v31 +; SI-NEXT: v_writelane_b32 v43, s4, 34 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s4, v38 +; SI-NEXT: v_writelane_b32 v43, s4, 35 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s4, v39 +; SI-NEXT: v_writelane_b32 v43, s4, 36 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s69, v48 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s30, v49 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s16, v50 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s36, v51 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:148 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:144 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s4, v33 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:136 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:132 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:128 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:124 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:120 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:116 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:336 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:112 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:108 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:104 +; SI-NEXT: v_writelane_b32 v43, s4, 37 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s99, v46 -; SI-NEXT: v_readfirstlane_b32 s78, v49 -; SI-NEXT: v_readfirstlane_b32 s30, v51 -; SI-NEXT: v_readfirstlane_b32 s66, v54 -; SI-NEXT: v_readfirstlane_b32 s91, v40 +; SI-NEXT: v_readfirstlane_b32 s4, v52 +; SI-NEXT: v_writelane_b32 v43, s4, 38 +; SI-NEXT: v_readfirstlane_b32 s4, v53 +; SI-NEXT: v_writelane_b32 v43, s4, 39 +; SI-NEXT: v_readfirstlane_b32 s4, v54 +; SI-NEXT: v_writelane_b32 v43, s4, 40 +; SI-NEXT: v_writelane_b32 v43, s44, 41 +; SI-NEXT: v_writelane_b32 v43, s6, 42 +; SI-NEXT: v_writelane_b32 v43, s7, 43 +; SI-NEXT: v_writelane_b32 v43, s8, 44 +; SI-NEXT: v_writelane_b32 v43, s9, 45 +; SI-NEXT: v_writelane_b32 v43, s10, 46 +; SI-NEXT: v_writelane_b32 v43, s11, 47 +; SI-NEXT: v_writelane_b32 v43, s12, 48 +; SI-NEXT: v_writelane_b32 v43, s13, 49 +; SI-NEXT: v_writelane_b32 v43, s14, 50 +; SI-NEXT: v_writelane_b32 v43, s15, 51 +; SI-NEXT: v_writelane_b32 v43, s18, 52 +; SI-NEXT: v_writelane_b32 v43, s21, 53 +; SI-NEXT: v_writelane_b32 v43, s22, 54 +; SI-NEXT: v_writelane_b32 v43, s40, 55 +; SI-NEXT: v_writelane_b32 v43, s41, 56 +; SI-NEXT: v_writelane_b32 v43, s42, 57 +; SI-NEXT: v_writelane_b32 v43, s43, 58 +; SI-NEXT: v_writelane_b32 v43, s76, 59 +; SI-NEXT: v_writelane_b32 v43, s77, 60 +; SI-NEXT: v_readfirstlane_b32 s93, v55 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_readfirstlane_b32 s6, v44 +; SI-NEXT: v_readfirstlane_b32 s95, v40 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s17, v33 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s98, v34 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s23, v35 +; SI-NEXT: v_readfirstlane_b32 s25, v31 +; SI-NEXT: v_readfirstlane_b32 s28, v32 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s26, v36 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s88, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s79, v38 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_readfirstlane_b32 s75, v39 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:100 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:88 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:84 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:80 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:68 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v50 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v48 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s24, v49 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s85, v50 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s66, v51 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:52 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s4, v10 -; SI-NEXT: v_writelane_b32 v62, s4, 15 -; SI-NEXT: v_readfirstlane_b32 s4, v2 -; SI-NEXT: v_writelane_b32 v62, s4, 16 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_readfirstlane_b32 s22, v36 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:312 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:280 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:248 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:216 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:184 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:152 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:120 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:88 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24 -; SI-NEXT: v_writelane_b32 v62, s17, 17 -; SI-NEXT: v_writelane_b32 v62, s15, 18 -; SI-NEXT: v_writelane_b32 v62, s18, 19 -; SI-NEXT: v_writelane_b32 v62, s43, 20 -; SI-NEXT: v_writelane_b32 v62, s42, 21 -; SI-NEXT: v_writelane_b32 v62, s44, 22 -; SI-NEXT: v_writelane_b32 v62, s16, 23 -; SI-NEXT: v_writelane_b32 v62, s49, 24 -; SI-NEXT: v_writelane_b32 v62, s8, 25 -; SI-NEXT: v_writelane_b32 v62, s6, 26 -; SI-NEXT: v_readfirstlane_b32 s45, v52 -; SI-NEXT: v_writelane_b32 v62, s56, 27 -; SI-NEXT: v_writelane_b32 v62, s45, 28 -; SI-NEXT: v_writelane_b32 v62, s53, 29 -; SI-NEXT: v_writelane_b32 v62, s94, 30 -; SI-NEXT: v_writelane_b32 v62, s57, 31 -; SI-NEXT: v_writelane_b32 v62, s58, 32 -; SI-NEXT: v_writelane_b32 v62, s47, 33 -; SI-NEXT: v_readfirstlane_b32 s46, v55 -; SI-NEXT: v_writelane_b32 v62, s40, 34 -; SI-NEXT: v_readfirstlane_b32 s59, v47 -; SI-NEXT: v_writelane_b32 v62, s46, 35 -; SI-NEXT: v_writelane_b32 v62, s59, 36 -; SI-NEXT: v_writelane_b32 v62, s60, 37 -; SI-NEXT: v_writelane_b32 v62, s36, 38 -; SI-NEXT: v_writelane_b32 v62, s65, 39 -; SI-NEXT: v_writelane_b32 v62, s61, 40 -; SI-NEXT: v_writelane_b32 v62, s73, 41 -; SI-NEXT: v_writelane_b32 v62, s62, 42 -; SI-NEXT: v_writelane_b32 v62, s72, 43 -; SI-NEXT: v_writelane_b32 v62, s23, 44 -; SI-NEXT: v_writelane_b32 v62, s48, 45 -; SI-NEXT: v_writelane_b32 v62, s34, 46 -; SI-NEXT: v_writelane_b32 v62, s78, 47 -; SI-NEXT: v_writelane_b32 v62, s30, 48 -; SI-NEXT: v_writelane_b32 v62, s54, 49 -; SI-NEXT: v_writelane_b32 v62, s50, 50 -; SI-NEXT: v_writelane_b32 v62, s52, 51 -; SI-NEXT: v_writelane_b32 v62, s82, 52 -; SI-NEXT: v_writelane_b32 v62, s66, 53 +; SI-NEXT: v_readfirstlane_b32 vcc_lo, v13 +; SI-NEXT: v_readfirstlane_b32 vcc_hi, v14 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 61 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 62 +; SI-NEXT: v_writelane_b32 v43, s38, 63 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v57 +; SI-NEXT: v_readfirstlane_b32 s20, v31 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v58 +; SI-NEXT: v_readfirstlane_b32 s19, v32 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v59 -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v56 -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v60 -; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v45 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v61 -; SI-NEXT: v_lshlrev_b32_e32 v46, 24, v42 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v41 -; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v31 -; SI-NEXT: v_writelane_b32 v62, s91, 54 -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: v_readfirstlane_b32 s27, v33 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s94, v34 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s72, v35 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s73, v36 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s67, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s71, v38 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_readfirstlane_b32 s97, v39 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:20 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB97_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s5, v62, 5 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_writelane_b32 v62, s4, 55 -; SI-NEXT: v_readlane_b32 s4, v62, 4 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: v_readlane_b32 s5, v62, 3 -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_or_b32 s63, s5, s4 -; SI-NEXT: v_readlane_b32 s4, v62, 6 -; SI-NEXT: s_and_b32 s5, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s19, 24 -; SI-NEXT: v_readlane_b32 s4, v62, 0 -; SI-NEXT: s_or_b32 s9, s9, s5 -; SI-NEXT: s_and_b32 s5, s4, 0xff -; SI-NEXT: s_lshl_b32 s10, s29, 8 -; SI-NEXT: s_or_b32 s4, s5, s10 -; SI-NEXT: v_writelane_b32 v62, s4, 56 -; SI-NEXT: s_and_b32 s5, s76, 0xff -; SI-NEXT: v_readlane_b32 s10, v62, 16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s10, 24 -; SI-NEXT: s_or_b32 s5, s11, s5 -; SI-NEXT: s_and_b32 s11, s26, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s12, s27, 24 -; SI-NEXT: s_or_b32 s14, s12, s11 -; SI-NEXT: s_and_b32 s11, s83, 0xff -; SI-NEXT: s_lshl_b32 s12, s25, 8 -; SI-NEXT: s_or_b32 s10, s11, s12 -; SI-NEXT: v_writelane_b32 v62, s10, 57 -; SI-NEXT: s_and_b32 s11, s64, 0xff -; SI-NEXT: v_readlane_b32 s10, v62, 15 -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s13, s10, 24 -; SI-NEXT: s_or_b32 s41, s13, s11 -; SI-NEXT: s_and_b32 s11, s43, 0xff -; SI-NEXT: s_lshl_b32 s13, s15, 8 -; SI-NEXT: s_or_b32 s10, s11, s13 -; SI-NEXT: s_and_b32 s11, s96, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s80, 24 -; SI-NEXT: s_or_b32 s43, s15, s11 -; SI-NEXT: s_and_b32 s11, s44, 0xff -; SI-NEXT: s_lshl_b32 s15, s42, 8 -; SI-NEXT: s_or_b32 s13, s11, s15 -; SI-NEXT: s_and_b32 s11, s18, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s97, 24 -; SI-NEXT: s_or_b32 s44, s15, s11 -; SI-NEXT: s_and_b32 s11, s59, 0xff -; SI-NEXT: s_lshl_b32 s15, s46, 8 -; SI-NEXT: s_or_b32 s12, s11, s15 -; SI-NEXT: s_and_b32 s11, s45, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s6, 24 -; SI-NEXT: s_or_b32 s45, s15, s11 -; SI-NEXT: s_and_b32 s11, s30, 0xff -; SI-NEXT: s_lshl_b32 s15, s78, 8 -; SI-NEXT: v_writelane_b32 v62, s10, 58 -; SI-NEXT: s_or_b32 s10, s11, s15 -; SI-NEXT: s_and_b32 s11, s99, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s89, 24 -; SI-NEXT: s_or_b32 s46, s15, s11 -; SI-NEXT: s_and_b32 s11, s61, 0xff -; SI-NEXT: s_lshl_b32 s15, s60, 8 -; SI-NEXT: s_or_b32 s6, s11, s15 -; SI-NEXT: s_and_b32 s11, s22, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s47, 24 -; SI-NEXT: s_or_b32 s47, s15, s11 -; SI-NEXT: s_and_b32 s11, s57, 0xff -; SI-NEXT: s_lshl_b32 s15, s56, 8 -; SI-NEXT: v_writelane_b32 v62, s6, 59 -; SI-NEXT: s_or_b32 s6, s11, s15 -; SI-NEXT: s_and_b32 s11, s39, 0xff -; SI-NEXT: v_writelane_b32 v62, s6, 60 -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s95, 24 -; SI-NEXT: s_or_b32 s56, s15, s11 -; SI-NEXT: s_and_b32 s11, s48, 0xff -; SI-NEXT: s_lshl_b32 s15, s72, 8 -; SI-NEXT: v_readlane_b32 s6, v62, 14 -; SI-NEXT: s_or_b32 s48, s11, s15 -; SI-NEXT: s_and_b32 s11, s6, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s31, 24 -; SI-NEXT: s_or_b32 vcc_lo, s15, s11 -; SI-NEXT: s_and_b32 s11, s86, 0xff -; SI-NEXT: s_lshl_b32 s15, s38, 8 -; SI-NEXT: s_or_b32 s72, s11, s15 -; SI-NEXT: s_and_b32 s11, s71, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s81, 24 -; SI-NEXT: s_or_b32 vcc_hi, s15, s11 -; SI-NEXT: s_and_b32 s11, s58, 0xff -; SI-NEXT: s_lshl_b32 s15, s85, 8 -; SI-NEXT: s_or_b32 s57, s11, s15 -; SI-NEXT: s_and_b32 s11, s69, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s74, 24 -; SI-NEXT: v_writelane_b32 v62, s74, 61 -; SI-NEXT: s_or_b32 s74, s15, s11 -; SI-NEXT: s_and_b32 s11, s87, 0xff -; SI-NEXT: s_lshl_b32 s15, s21, 8 -; SI-NEXT: s_or_b32 s58, s11, s15 -; SI-NEXT: s_and_b32 s11, s68, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s28, 24 -; SI-NEXT: s_or_b32 s75, s15, s11 -; SI-NEXT: s_and_b32 s11, s24, 0xff -; SI-NEXT: s_lshl_b32 s15, s55, 8 -; SI-NEXT: v_writelane_b32 v62, s25, 62 -; SI-NEXT: s_or_b32 s59, s11, s15 -; SI-NEXT: s_and_b32 s11, s37, 0xff -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s51, 24 -; SI-NEXT: v_readlane_b32 s4, v62, 13 -; SI-NEXT: s_mov_b32 s18, s21 -; SI-NEXT: s_mov_b32 s21, s97 -; SI-NEXT: s_mov_b32 s97, s37 -; SI-NEXT: s_mov_b32 s37, s76 -; SI-NEXT: s_or_b32 s76, s15, s11 -; SI-NEXT: s_and_b32 s11, s35, 0xff -; SI-NEXT: s_lshl_b32 s15, s4, 8 -; SI-NEXT: s_or_b32 s60, s11, s15 -; SI-NEXT: s_and_b32 s11, s77, 0xff -; SI-NEXT: v_readlane_b32 s4, v62, 12 -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s4, 24 -; SI-NEXT: v_readlane_b32 s4, v62, 11 -; SI-NEXT: s_mov_b32 s6, s95 -; SI-NEXT: s_mov_b32 s95, s39 -; SI-NEXT: s_mov_b32 s39, s89 -; SI-NEXT: s_mov_b32 s89, s99 -; SI-NEXT: s_mov_b32 s99, s83 -; SI-NEXT: s_mov_b32 s83, s55 -; SI-NEXT: s_mov_b32 s55, s64 -; SI-NEXT: s_mov_b32 s64, s35 -; SI-NEXT: s_mov_b32 s35, s77 -; SI-NEXT: s_or_b32 s77, s15, s11 -; SI-NEXT: s_and_b32 s11, s4, 0xff -; SI-NEXT: v_readlane_b32 s4, v62, 10 -; SI-NEXT: s_lshl_b32 s15, s4, 8 -; SI-NEXT: v_readlane_b32 s4, v62, 9 -; SI-NEXT: s_or_b32 s61, s11, s15 -; SI-NEXT: s_and_b32 s11, s4, 0xff -; SI-NEXT: v_readlane_b32 s4, v62, 8 -; SI-NEXT: s_lshl_b32 s11, s11, 16 -; SI-NEXT: s_lshl_b32 s15, s4, 24 -; SI-NEXT: s_or_b32 s78, s15, s11 -; SI-NEXT: v_readlane_b32 s11, v62, 7 -; SI-NEXT: s_and_b32 s11, s11, 0xff -; SI-NEXT: s_lshl_b32 s15, s17, 8 -; SI-NEXT: s_or_b32 s11, s11, s15 -; SI-NEXT: s_and_b32 s11, s11, 0xffff -; SI-NEXT: v_mov_b32_e32 v51, s9 -; SI-NEXT: s_or_b32 s17, s11, s9 -; SI-NEXT: v_readlane_b32 s9, v62, 2 -; SI-NEXT: v_readlane_b32 s11, v62, 1 -; SI-NEXT: s_and_b32 s9, s9, 0xff -; SI-NEXT: s_lshl_b32 s15, s11, 8 -; SI-NEXT: s_or_b32 s9, s9, s15 -; SI-NEXT: s_and_b32 s9, s9, 0xffff -; SI-NEXT: s_mov_b32 s4, s96 -; SI-NEXT: s_mov_b32 s96, s24 -; SI-NEXT: v_mov_b32_e32 v52, s14 -; SI-NEXT: s_or_b32 s24, s9, s14 -; SI-NEXT: s_and_b32 s14, s93, 0xff -; SI-NEXT: s_lshl_b32 s15, s84, 8 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v53, v6, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v47, s14, v53 -; SI-NEXT: s_and_b32 s14, s8, 0xff -; SI-NEXT: s_lshl_b32 s15, s49, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v13 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v54, v14, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v45, s14, v54 -; SI-NEXT: s_and_b32 s14, s40, 0xff -; SI-NEXT: s_lshl_b32 s15, s53, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v21 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v55, v18, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v11, s14, v55 -; SI-NEXT: s_and_b32 s14, s34, 0xff -; SI-NEXT: s_lshl_b32 s15, s23, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v29 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v40, v19, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v50, s14, v40 -; SI-NEXT: s_and_b32 s14, s91, 0xff -; SI-NEXT: s_lshl_b32 s15, s66, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v5 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v41, v20, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v17, s14, v41 -; SI-NEXT: s_and_b32 s14, s50, 0xff -; SI-NEXT: s_lshl_b32 s15, s54, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v32 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v42, v22, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v16, s14, v42 -; SI-NEXT: s_and_b32 s14, s73, 0xff -; SI-NEXT: s_lshl_b32 s15, s36, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v33 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v59, v23, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v15, s14, v59 -; SI-NEXT: s_and_b32 s14, s82, 0xff -; SI-NEXT: s_lshl_b32 s15, s52, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v34 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v60, v24, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v12, s14, v60 -; SI-NEXT: s_and_b32 s14, s90, 0xff -; SI-NEXT: s_lshl_b32 s15, s16, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v35 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v2, v25, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v10, s14, v2 -; SI-NEXT: s_and_b32 s14, s62, 0xff -; SI-NEXT: s_lshl_b32 s15, s65, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v36 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v61, v26, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v8, s14, v61 -; SI-NEXT: s_and_b32 s14, s98, 0xff -; SI-NEXT: s_lshl_b32 s15, s67, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v37 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_or_b32_e32 v6, v27, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: v_or_b32_e32 v7, s14, v6 -; SI-NEXT: s_and_b32 s14, s92, 0xff -; SI-NEXT: s_lshl_b32 s15, s7, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v38 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_readlane_b32 s8, v62, 55 -; SI-NEXT: v_mov_b32_e32 v28, v38 -; SI-NEXT: v_mov_b32_e32 v38, v25 -; SI-NEXT: v_mov_b32_e32 v25, v35 -; SI-NEXT: v_mov_b32_e32 v35, v22 -; SI-NEXT: v_mov_b32_e32 v22, v32 -; SI-NEXT: v_mov_b32_e32 v32, v18 -; SI-NEXT: v_mov_b32_e32 v18, v13 -; SI-NEXT: v_or_b32_e32 v13, v46, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: v_or_b32_e32 v4, s14, v13 -; SI-NEXT: s_and_b32 s14, s70, 0xff -; SI-NEXT: s_lshl_b32 s15, s94, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v39 -; SI-NEXT: s_or_b32 s42, s8, s63 -; SI-NEXT: v_readlane_b32 s8, v62, 56 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: v_mov_b32_e32 v48, v27 -; SI-NEXT: v_mov_b32_e32 v27, v37 -; SI-NEXT: v_mov_b32_e32 v37, v24 -; SI-NEXT: v_mov_b32_e32 v24, v34 -; SI-NEXT: v_mov_b32_e32 v34, v20 -; SI-NEXT: v_mov_b32_e32 v20, v19 -; SI-NEXT: v_mov_b32_e32 v19, v29 -; SI-NEXT: v_or_b32_e32 v29, v30, v1 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: s_or_b32 s40, s8, s5 -; SI-NEXT: v_readlane_b32 s8, v62, 57 -; SI-NEXT: v_or_b32_e32 v31, s14, v29 -; SI-NEXT: s_and_b32 s14, s88, 0xff -; SI-NEXT: s_lshl_b32 s15, s79, 8 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v43 -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: v_readlane_b32 s9, v62, 60 -; SI-NEXT: s_or_b32 s14, s14, s15 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_or_b32 s15, s8, s41 -; SI-NEXT: v_readlane_b32 s8, v62, 58 -; SI-NEXT: s_and_b32 s16, s9, 0xffff -; SI-NEXT: v_mov_b32_e32 v9, v30 -; SI-NEXT: v_mov_b32_e32 v30, v39 -; SI-NEXT: v_mov_b32_e32 v39, v26 -; SI-NEXT: v_mov_b32_e32 v26, v36 -; SI-NEXT: v_mov_b32_e32 v36, v23 -; SI-NEXT: v_mov_b32_e32 v23, v33 -; SI-NEXT: v_mov_b32_e32 v33, v5 -; SI-NEXT: v_mov_b32_e32 v5, v14 -; SI-NEXT: v_or_b32_e32 v14, v44, v3 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: s_or_b32 s36, s16, s56 -; SI-NEXT: s_and_b32 s16, s48, 0xffff -; SI-NEXT: v_or_b32_e32 v49, s14, v14 -; SI-NEXT: s_or_b32 s14, s8, s43 -; SI-NEXT: s_and_b32 s8, s13, 0xffff -; SI-NEXT: s_or_b32 s53, s16, vcc_lo -; SI-NEXT: s_and_b32 s16, s72, 0xffff -; SI-NEXT: s_or_b32 s13, s8, s44 -; SI-NEXT: s_and_b32 s8, s12, 0xffff -; SI-NEXT: s_or_b32 s94, s16, vcc_hi -; SI-NEXT: s_and_b32 s16, s57, 0xffff -; SI-NEXT: s_or_b32 s12, s8, s45 -; SI-NEXT: s_and_b32 s8, s10, 0xffff -; SI-NEXT: s_or_b32 s49, s16, s74 -; SI-NEXT: s_and_b32 s16, s58, 0xffff -; SI-NEXT: s_or_b32 s10, s8, s46 -; SI-NEXT: v_readlane_b32 s8, v62, 59 -; SI-NEXT: s_or_b32 s48, s16, s75 -; SI-NEXT: s_and_b32 s16, s59, 0xffff -; SI-NEXT: s_and_b32 s8, s8, 0xffff -; SI-NEXT: s_or_b32 s11, s16, s76 -; SI-NEXT: s_and_b32 s16, s60, 0xffff -; SI-NEXT: s_and_b32 s23, s61, 0xffff -; SI-NEXT: s_mov_b32 s30, s87 -; SI-NEXT: s_mov_b32 s87, s85 -; SI-NEXT: s_or_b32 s8, s8, s47 -; SI-NEXT: s_or_b32 s9, s16, s77 -; SI-NEXT: s_or_b32 s16, s23, s78 -; SI-NEXT: v_alignbit_b32 v57, s42, v51, 16 -; SI-NEXT: v_alignbit_b32 v58, s40, v52, 16 -; SI-NEXT: v_alignbit_b32 v56, s15, v53, 16 -; SI-NEXT: v_alignbit_b32 v51, s14, v54, 16 -; SI-NEXT: v_alignbit_b32 v1, s13, v55, 16 -; SI-NEXT: v_alignbit_b32 v3, s12, v40, 16 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v44, s10, v41, 16 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v43, s8, v42, 16 -; SI-NEXT: v_alignbit_b32 v42, s36, v59, 16 -; SI-NEXT: v_alignbit_b32 v41, s53, v60, 16 -; SI-NEXT: v_alignbit_b32 v40, s94, v2, 16 -; SI-NEXT: v_mov_b32_e32 v2, v4 -; SI-NEXT: v_mov_b32_e32 v4, v7 -; SI-NEXT: v_mov_b32_e32 v7, v8 -; SI-NEXT: v_mov_b32_e32 v8, v10 -; SI-NEXT: v_mov_b32_e32 v10, v15 -; SI-NEXT: v_mov_b32_e32 v15, v17 -; SI-NEXT: v_mov_b32_e32 v17, v11 -; SI-NEXT: v_alignbit_b32 v55, s49, v61, 16 -; SI-NEXT: v_alignbit_b32 v54, s48, v6, 16 -; SI-NEXT: v_alignbit_b32 v53, s11, v13, 16 -; SI-NEXT: v_mov_b32_e32 v13, v18 -; SI-NEXT: v_mov_b32_e32 v18, v32 -; SI-NEXT: v_mov_b32_e32 v32, v22 -; SI-NEXT: v_mov_b32_e32 v22, v35 -; SI-NEXT: v_mov_b32_e32 v35, v25 -; SI-NEXT: v_mov_b32_e32 v25, v38 -; SI-NEXT: v_mov_b32_e32 v38, v28 -; SI-NEXT: v_alignbit_b32 v52, s9, v29, 16 -; SI-NEXT: v_mov_b32_e32 v29, v19 -; SI-NEXT: v_mov_b32_e32 v19, v20 -; SI-NEXT: v_mov_b32_e32 v20, v34 -; SI-NEXT: v_mov_b32_e32 v34, v24 -; SI-NEXT: v_mov_b32_e32 v24, v37 -; SI-NEXT: v_mov_b32_e32 v37, v27 -; SI-NEXT: v_mov_b32_e32 v27, v48 -; SI-NEXT: v_alignbit_b32 v28, s16, v14, 16 -; SI-NEXT: v_mov_b32_e32 v14, v5 -; SI-NEXT: v_mov_b32_e32 v5, v33 -; SI-NEXT: v_mov_b32_e32 v33, v23 -; SI-NEXT: v_mov_b32_e32 v23, v36 -; SI-NEXT: v_mov_b32_e32 v36, v26 -; SI-NEXT: v_mov_b32_e32 v26, v39 -; SI-NEXT: v_mov_b32_e32 v39, v30 -; SI-NEXT: v_mov_b32_e32 v30, v9 -; SI-NEXT: v_mov_b32_e32 v9, v12 -; SI-NEXT: v_mov_b32_e32 v12, v16 -; SI-NEXT: v_mov_b32_e32 v16, v50 -; SI-NEXT: s_lshr_b32 s73, s63, 16 -; SI-NEXT: s_lshr_b32 s72, s5, 16 -; SI-NEXT: s_lshr_b32 s63, s41, 16 -; SI-NEXT: s_lshr_b32 s62, s43, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s60, s45, 16 -; SI-NEXT: s_lshr_b32 s59, s46, 16 -; SI-NEXT: s_lshr_b32 s58, s47, 16 -; SI-NEXT: s_lshr_b32 s57, s56, 16 -; SI-NEXT: s_lshr_b32 s56, vcc_lo, 16 -; SI-NEXT: s_lshr_b32 s47, vcc_hi, 16 -; SI-NEXT: s_lshr_b32 s46, s74, 16 -; SI-NEXT: v_readlane_b32 s25, v62, 62 -; SI-NEXT: v_readlane_b32 s74, v62, 61 -; SI-NEXT: s_lshr_b32 s45, s75, 16 -; SI-NEXT: s_lshr_b32 s44, s76, 16 -; SI-NEXT: s_mov_b32 s76, s37 -; SI-NEXT: s_mov_b32 s37, s97 -; SI-NEXT: s_mov_b32 s97, s21 -; SI-NEXT: s_mov_b32 s21, s18 -; SI-NEXT: s_mov_b32 s18, s17 -; SI-NEXT: s_mov_b32 s85, s87 -; SI-NEXT: s_mov_b32 s87, s30 -; SI-NEXT: s_mov_b32 s17, s24 -; SI-NEXT: s_lshr_b32 s43, s77, 16 -; SI-NEXT: s_mov_b32 s77, s35 -; SI-NEXT: s_mov_b32 s35, s64 -; SI-NEXT: s_mov_b32 s64, s55 -; SI-NEXT: s_mov_b32 s55, s83 -; SI-NEXT: s_mov_b32 s83, s99 -; SI-NEXT: s_mov_b32 s99, s89 -; SI-NEXT: s_mov_b32 s89, s39 -; SI-NEXT: s_mov_b32 s39, s95 -; SI-NEXT: s_mov_b32 s95, s6 -; SI-NEXT: s_lshr_b32 s41, s78, 16 -; SI-NEXT: s_mov_b32 s24, s96 -; SI-NEXT: s_mov_b32 s96, s4 -; SI-NEXT: s_cbranch_execnz .LBB97_3 -; SI-NEXT: .LBB97_2: ; %cmp.true -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: s_add_i32 s4, s88, 3 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s79, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_readlane_b32 s5, v62, 10 -; SI-NEXT: v_readlane_b32 s6, v62, 9 -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s8, s6, 3 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_add_i32 s70, s70, 3 -; SI-NEXT: v_readlane_b32 s6, v62, 30 -; SI-NEXT: s_add_i32 s9, s77, 3 -; SI-NEXT: s_and_b32 s9, s9, 0xff -; SI-NEXT: s_lshl_b32 s9, s9, 16 -; SI-NEXT: s_add_i32 s79, s92, 3 -; SI-NEXT: s_add_i32 s52, s98, 3 +; SI-NEXT: v_readfirstlane_b32 s35, v48 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s83, v49 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s87, v50 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s63, v51 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v37 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v4, v27, v4 -; SI-NEXT: s_add_i32 s30, s87, 3 +; SI-NEXT: v_readfirstlane_b32 s74, v31 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v36 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v26, v7 -; SI-NEXT: s_add_i32 s50, s90, 3 +; SI-NEXT: v_readfirstlane_b32 s81, v32 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v35 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v8, v25, v8 -; SI-NEXT: s_add_i32 s94, s86, 3 +; SI-NEXT: v_readfirstlane_b32 s80, v33 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v34 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v9, v24, v9 +; SI-NEXT: v_readfirstlane_b32 s86, v34 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v33 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v23, v10 +; SI-NEXT: v_readfirstlane_b32 s34, v35 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v32 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, v22, v11 +; SI-NEXT: v_readfirstlane_b32 s84, v36 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v5, v20, v5 +; SI-NEXT: v_readfirstlane_b32 s31, v37 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s61, v38 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_readfirstlane_b32 s62, v39 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_readfirstlane_b32 s53, v48 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_readlane_b32 s4, v62, 11 -; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 8 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s5, s70, 0xff -; SI-NEXT: s_lshl_b32 s8, s6, 8 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v39 -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_addk_i32 s5, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_and_b32 s5, s5, 0xffff -; SI-NEXT: v_or_b32_e32 v2, v30, v2 -; SI-NEXT: v_or_b32_e32 v2, s5, v2 -; SI-NEXT: s_add_i32 s5, s35, 3 -; SI-NEXT: v_readlane_b32 s6, v62, 13 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: s_lshl_b32 s8, s6, 8 -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: v_readlane_b32 s6, v62, 12 -; SI-NEXT: s_lshl_b32 s8, s6, 24 -; SI-NEXT: s_addk_i32 s5, 0x300 -; SI-NEXT: s_or_b32 s8, s8, s9 -; SI-NEXT: s_and_b32 s5, s5, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: s_add_i32 s16, s4, 0x3000000 -; SI-NEXT: v_add_i32_e32 v31, vcc, 0x3000000, v2 -; SI-NEXT: s_add_i32 s9, s5, 0x3000000 -; SI-NEXT: s_and_b32 s4, s79, 0xff -; SI-NEXT: s_lshl_b32 s5, s7, 8 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v38 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v2, v46, v2 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_add_i32 s4, s24, 3 +; SI-NEXT: v_readfirstlane_b32 s52, v49 +; SI-NEXT: v_writelane_b32 v42, s52, 7 +; SI-NEXT: v_writelane_b32 v42, s53, 8 +; SI-NEXT: v_writelane_b32 v42, s57, 9 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_readfirstlane_b32 s54, v50 +; SI-NEXT: v_writelane_b32 v42, s58, 10 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_readfirstlane_b32 s55, v51 +; SI-NEXT: v_writelane_b32 v42, s54, 11 +; SI-NEXT: v_writelane_b32 v42, s55, 12 +; SI-NEXT: v_writelane_b32 v42, s64, 13 +; SI-NEXT: v_writelane_b32 v42, s65, 14 +; SI-NEXT: v_writelane_b32 v42, s67, 15 +; SI-NEXT: v_writelane_b32 v42, s71, 16 +; SI-NEXT: v_writelane_b32 v42, s80, 17 +; SI-NEXT: v_writelane_b32 v42, s81, 18 +; SI-NEXT: v_writelane_b32 v42, s59, 19 +; SI-NEXT: v_writelane_b32 v42, s60, 20 +; SI-NEXT: v_writelane_b32 v42, s86, 21 +; SI-NEXT: v_writelane_b32 v42, s97, 22 +; SI-NEXT: v_writelane_b32 v42, s34, 23 +; SI-NEXT: v_writelane_b32 v42, s66, 24 +; SI-NEXT: v_writelane_b32 v42, s85, 25 +; SI-NEXT: v_writelane_b32 v42, s31, 26 +; SI-NEXT: v_writelane_b32 v42, s84, 27 +; SI-NEXT: v_writelane_b32 v42, s35, 28 +; SI-NEXT: v_writelane_b32 v42, s98, 29 +; SI-NEXT: v_writelane_b32 v42, s17, 30 +; SI-NEXT: v_writelane_b32 v42, s20, 31 +; SI-NEXT: v_writelane_b32 v42, s61, 32 +; SI-NEXT: v_writelane_b32 v42, s19, 33 +; SI-NEXT: v_writelane_b32 v42, s62, 34 +; SI-NEXT: v_writelane_b32 v42, s23, 35 +; SI-NEXT: v_writelane_b32 v42, s83, 36 +; SI-NEXT: v_writelane_b32 v42, s87, 37 +; SI-NEXT: v_writelane_b32 v42, s26, 38 +; SI-NEXT: v_writelane_b32 v42, s94, 39 +; SI-NEXT: v_writelane_b32 v42, s27, 40 +; SI-NEXT: v_writelane_b32 v42, s63, 41 +; SI-NEXT: v_writelane_b32 v42, s79, 42 +; SI-NEXT: v_writelane_b32 v42, s88, 43 +; SI-NEXT: v_writelane_b32 v42, s72, 44 +; SI-NEXT: v_writelane_b32 v42, s73, 45 +; SI-NEXT: v_writelane_b32 v42, s74, 46 +; SI-NEXT: v_writelane_b32 v42, s75, 47 +; SI-NEXT: v_writelane_b32 v42, s24, 48 +; SI-NEXT: v_writelane_b32 v42, s25, 49 +; SI-NEXT: v_writelane_b32 v42, s28, 50 +; SI-NEXT: s_cbranch_scc0 .LBB97_4 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: v_readlane_b32 s4, v43, 13 +; SI-NEXT: v_readlane_b32 s5, v43, 12 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s55, 8 -; SI-NEXT: s_add_i32 s8, s37, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s51, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s11, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s52, 0xff -; SI-NEXT: s_lshl_b32 s5, s67, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s30, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 -; SI-NEXT: s_add_i32 s8, s68, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s28, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s48, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 42 -; SI-NEXT: s_add_i32 s87, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 39 -; SI-NEXT: s_and_b32 s4, s87, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: v_readlane_b32 s4, v62, 32 -; SI-NEXT: s_add_i32 s67, s4, 3 -; SI-NEXT: s_and_b32 s4, s67, 0xff -; SI-NEXT: s_lshl_b32 s5, s85, 8 -; SI-NEXT: s_add_i32 s8, s69, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s74, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 23 -; SI-NEXT: s_add_i32 s49, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s50, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s94, 0xff -; SI-NEXT: s_lshl_b32 s5, s38, 8 -; SI-NEXT: s_add_i32 s8, s71, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s81, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s94, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 52 -; SI-NEXT: s_add_i32 s18, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 51 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: v_readlane_b32 s4, v62, 45 -; SI-NEXT: s_add_i32 s98, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 43 -; SI-NEXT: v_readlane_b32 s6, v62, 14 -; SI-NEXT: s_and_b32 s4, s98, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s8, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s31, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s53, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 41 -; SI-NEXT: s_add_i32 s86, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 38 -; SI-NEXT: s_and_b32 s4, s86, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: v_readlane_b32 s4, v62, 31 -; SI-NEXT: s_add_i32 s66, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 27 -; SI-NEXT: s_and_b32 s4, s66, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s37, s39, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s37, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s95, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s36, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 50 -; SI-NEXT: s_add_i32 s21, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 49 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: v_readlane_b32 s4, v62, 40 -; SI-NEXT: s_add_i32 s85, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 37 -; SI-NEXT: s_and_b32 s4, s85, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s71, s22, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 33 -; SI-NEXT: s_and_b32 s8, s71, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s8, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 54 -; SI-NEXT: s_add_i32 s17, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 53 -; SI-NEXT: s_and_b32 s4, s17, 0xff +; SI-NEXT: s_or_b32 s29, s4, s5 +; SI-NEXT: v_readlane_b32 s4, v43, 5 +; SI-NEXT: v_readlane_b32 s5, v43, 4 +; SI-NEXT: s_and_b32 s4, s4, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: v_readlane_b32 s4, v62, 48 -; SI-NEXT: s_add_i32 s7, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 47 +; SI-NEXT: s_or_b32 s45, s4, s5 +; SI-NEXT: s_and_b32 s4, s43, 0xff +; SI-NEXT: s_lshl_b32 s5, s42, 8 +; SI-NEXT: s_or_b32 s46, s4, s5 +; SI-NEXT: s_and_b32 s4, s56, 0xff +; SI-NEXT: s_lshl_b32 s5, s47, 8 +; SI-NEXT: s_or_b32 s47, s4, s5 +; SI-NEXT: s_and_b32 s4, s58, 0xff +; SI-NEXT: s_lshl_b32 s5, s57, 8 +; SI-NEXT: s_or_b32 s56, s4, s5 +; SI-NEXT: s_and_b32 s4, s60, 0xff +; SI-NEXT: s_lshl_b32 s5, s59, 8 +; SI-NEXT: s_or_b32 s57, s4, s5 +; SI-NEXT: s_and_b32 s4, s62, 0xff +; SI-NEXT: s_lshl_b32 s5, s61, 8 +; SI-NEXT: s_or_b32 s58, s4, s5 +; SI-NEXT: s_and_b32 s4, s74, 0xff +; SI-NEXT: s_lshl_b32 s5, s63, 8 +; SI-NEXT: s_or_b32 s59, s4, s5 +; SI-NEXT: s_and_b32 s4, s73, 0xff +; SI-NEXT: s_lshl_b32 s5, s72, 8 +; SI-NEXT: s_or_b32 s60, s4, s5 +; SI-NEXT: s_and_b32 s4, s24, 0xff +; SI-NEXT: s_lshl_b32 s5, s75, 8 +; SI-NEXT: s_or_b32 s61, s4, s5 +; SI-NEXT: s_and_b32 s4, s28, 0xff +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s62, s4, s5 +; SI-NEXT: s_and_b32 s4, s36, 0xff +; SI-NEXT: s_lshl_b32 s5, s16, 8 +; SI-NEXT: s_or_b32 s63, s4, s5 +; SI-NEXT: s_and_b32 s4, s10, 0xff +; SI-NEXT: s_lshl_b32 s5, s9, 8 +; SI-NEXT: s_or_b32 s72, s4, s5 ; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s35, s99, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s35, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s89, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s10, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 46 -; SI-NEXT: s_add_i32 s99, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 44 -; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v5 -; SI-NEXT: s_and_b32 s4, s99, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v29 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v5, v19, v5 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: v_readlane_b32 s4, v62, 36 -; SI-NEXT: s_add_i32 s81, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 35 -; SI-NEXT: v_readlane_b32 s6, v62, 28 -; SI-NEXT: s_and_b32 s4, s81, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s55, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 26 -; SI-NEXT: s_and_b32 s6, s55, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s12, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 34 -; SI-NEXT: s_add_i32 s69, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 29 -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v5 -; SI-NEXT: s_and_b32 s4, s69, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v21 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v5, v18, v5 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v13 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s4, v62, 22 -; SI-NEXT: s_add_i32 s34, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 21 -; SI-NEXT: v_readlane_b32 s6, v62, 19 -; SI-NEXT: s_and_b32 s4, s34, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s92, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s92, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s97, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s13, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 25 -; SI-NEXT: s_add_i32 s51, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 24 -; SI-NEXT: s_and_b32 s4, s51, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v5, v14, v5 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: v_readlane_b32 s4, v62, 20 -; SI-NEXT: s_add_i32 s95, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 18 -; SI-NEXT: s_and_b32 s4, s95, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s6, s96, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s80, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s14, s4, 0x3000000 -; SI-NEXT: s_add_i32 s4, s93, 3 +; SI-NEXT: s_lshl_b32 s5, s78, 8 +; SI-NEXT: s_or_b32 s73, s4, s5 +; SI-NEXT: s_and_b32 s4, s8, 0xff +; SI-NEXT: s_lshl_b32 s5, s91, 8 +; SI-NEXT: s_or_b32 s74, s4, s5 +; SI-NEXT: s_and_b32 s4, s6, 0xff +; SI-NEXT: s_lshl_b32 s5, s90, 8 +; SI-NEXT: s_or_b32 s75, s4, s5 +; SI-NEXT: v_readlane_b32 s4, v43, 9 +; SI-NEXT: v_readlane_b32 s5, v43, 8 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s84, 8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 8 -; SI-NEXT: s_add_i32 s6, s64, 3 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: v_add_i32_e32 v49, vcc, 0x3000000, v1 -; SI-NEXT: v_mov_b32_e32 v30, s16 -; SI-NEXT: v_mov_b32_e32 v39, s9 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x3000000, v2 -; SI-NEXT: v_mov_b32_e32 v28, s11 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v4 -; SI-NEXT: v_mov_b32_e32 v27, s48 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v7 -; SI-NEXT: v_mov_b32_e32 v26, s49 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v8 -; SI-NEXT: v_mov_b32_e32 v25, s94 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v9 -; SI-NEXT: v_mov_b32_e32 v24, s53 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v10 -; SI-NEXT: v_mov_b32_e32 v23, s36 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_or_b32_e32 v6, v6, v13 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_add_i32 s4, s83, 3 +; SI-NEXT: s_lshl_b32 s5, s5, 8 +; SI-NEXT: s_or_b32 s5, s4, s5 +; SI-NEXT: v_readlane_b32 s4, v43, 7 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 15 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s15, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 2 -; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 1 +; SI-NEXT: v_readlane_b32 s6, v43, 6 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: s_or_b32 s7, s6, s4 +; SI-NEXT: v_readlane_b32 s4, v43, 11 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: s_add_i32 s6, s26, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readlane_b32 s6, v43, 10 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: v_writelane_b32 v42, s7, 51 +; SI-NEXT: s_or_b32 s4, s6, s4 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_readlane_b32 s6, v43, 1 +; SI-NEXT: v_readlane_b32 s7, v43, 0 ; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s27, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 8 +; SI-NEXT: s_or_b32 s7, s6, s7 +; SI-NEXT: s_and_b32 s6, s11, 0xff ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s17, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 0 -; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s6, s76, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 16 +; SI-NEXT: s_lshl_b32 s8, s12, 24 +; SI-NEXT: s_or_b32 s37, s8, s6 +; SI-NEXT: v_readlane_b32 s6, v43, 3 ; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 +; SI-NEXT: v_readlane_b32 s8, v43, 2 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s40, s4, 0x3000000 -; SI-NEXT: v_readlane_b32 s4, v62, 7 +; SI-NEXT: s_lshl_b32 s8, s8, 24 +; SI-NEXT: s_or_b32 s6, s8, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: s_lshl_b32 s9, s15, 8 +; SI-NEXT: s_or_b32 s9, s8, s9 +; SI-NEXT: s_and_b32 s8, s13, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s10, s14, 24 +; SI-NEXT: s_or_b32 s68, s10, s8 +; SI-NEXT: s_and_b32 s8, s21, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s10, s22, 24 +; SI-NEXT: s_or_b32 s8, s10, s8 +; SI-NEXT: s_and_b32 s10, s77, 0xff +; SI-NEXT: s_lshl_b32 s11, s76, 8 +; SI-NEXT: s_or_b32 s11, s10, s11 +; SI-NEXT: s_and_b32 s10, s40, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s12, s41, 24 +; SI-NEXT: s_or_b32 s99, s12, s10 +; SI-NEXT: s_and_b32 s10, vcc_lo, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s12, vcc_hi, 24 +; SI-NEXT: s_or_b32 s10, s12, s10 +; SI-NEXT: s_and_b32 s12, s49, 0xff +; SI-NEXT: s_lshl_b32 s13, s48, 8 +; SI-NEXT: s_or_b32 s13, s12, s13 +; SI-NEXT: s_and_b32 s12, s38, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s14, s39, 24 +; SI-NEXT: s_or_b32 s92, s14, s12 +; SI-NEXT: s_and_b32 s12, s50, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s14, s51, 24 +; SI-NEXT: s_or_b32 s12, s14, s12 +; SI-NEXT: s_and_b32 s14, s55, 0xff +; SI-NEXT: s_lshl_b32 s15, s54, 8 +; SI-NEXT: s_or_b32 s15, s14, s15 +; SI-NEXT: s_and_b32 s14, s52, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s24, s53, 24 +; SI-NEXT: s_mov_b32 s28, s90 +; SI-NEXT: s_or_b32 s90, s24, s14 +; SI-NEXT: s_and_b32 s14, s64, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s25, s65, 24 +; SI-NEXT: s_or_b32 s14, s25, s14 +; SI-NEXT: s_and_b32 s25, s34, 0xff +; SI-NEXT: s_lshl_b32 s40, s86, 8 +; SI-NEXT: s_or_b32 s41, s25, s40 +; SI-NEXT: s_and_b32 s25, s80, 0xff +; SI-NEXT: s_lshl_b32 s25, s25, 16 +; SI-NEXT: s_lshl_b32 s40, s81, 24 +; SI-NEXT: s_or_b32 s18, s40, s25 +; SI-NEXT: s_and_b32 s40, s31, 0xff +; SI-NEXT: s_lshl_b32 s40, s40, 16 +; SI-NEXT: s_lshl_b32 s42, s84, 24 +; SI-NEXT: s_or_b32 s40, s42, s40 +; SI-NEXT: s_and_b32 s42, s35, 0xff +; SI-NEXT: s_lshl_b32 s43, s97, 8 +; SI-NEXT: s_or_b32 s43, s42, s43 +; SI-NEXT: s_and_b32 s42, s71, 0xff +; SI-NEXT: s_lshl_b32 s42, s42, 16 +; SI-NEXT: s_lshl_b32 s76, s67, 24 +; SI-NEXT: s_or_b32 s35, s76, s42 +; SI-NEXT: s_and_b32 s42, s87, 0xff +; SI-NEXT: s_lshl_b32 s42, s42, 16 +; SI-NEXT: s_lshl_b32 s76, s83, 24 +; SI-NEXT: s_or_b32 s42, s76, s42 +; SI-NEXT: s_and_b32 s76, s19, 0xff +; SI-NEXT: s_lshl_b32 s77, s20, 8 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s66, 0xff +; SI-NEXT: v_writelane_b32 v42, s78, 52 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s85, 24 +; SI-NEXT: s_or_b32 s19, s78, s77 +; SI-NEXT: s_and_b32 s77, s94, 0xff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s27, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: s_or_b32 vcc_lo, s78, s77 +; SI-NEXT: s_or_b32 vcc_hi, s76, s19 +; SI-NEXT: s_and_b32 s76, s26, 0xff +; SI-NEXT: s_lshl_b32 s77, s23, 8 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s98, 0xff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: s_or_b32 s71, s78, s77 +; SI-NEXT: s_and_b32 s77, s79, 0xff +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 40 +; SI-NEXT: s_and_b32 s41, s41, 0xffff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s88, 24 +; SI-NEXT: s_or_b32 s39, s76, s71 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 39 +; SI-NEXT: s_or_b32 s41, s41, s18 +; SI-NEXT: s_mov_b32 s31, s18 +; SI-NEXT: s_or_b32 s38, s78, s77 +; SI-NEXT: s_lshl_b32 s77, s17, 8 +; SI-NEXT: v_readlane_b32 s18, v43, 38 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 37 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s18, 24 +; SI-NEXT: s_or_b32 s80, s78, s77 +; SI-NEXT: s_and_b32 s77, s95, 0xff +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 36 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s93, 24 +; SI-NEXT: s_or_b32 s49, s76, s80 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 35 +; SI-NEXT: s_or_b32 s48, s78, s77 +; SI-NEXT: s_lshl_b32 s77, s17, 8 +; SI-NEXT: v_readlane_b32 s17, v43, 34 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 33 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: s_or_b32 s81, s78, s77 +; SI-NEXT: s_and_b32 s77, s30, 0xff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s69, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 31 +; SI-NEXT: s_or_b32 s50, s78, s77 +; SI-NEXT: s_or_b32 s51, s76, s81 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: s_lshl_b32 s77, s96, 8 +; SI-NEXT: v_readlane_b32 s17, v43, 30 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s17, 0xff +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s82, 24 +; SI-NEXT: v_writelane_b32 v42, s96, 53 +; SI-NEXT: v_readlane_b32 s18, v43, 32 +; SI-NEXT: v_writelane_b32 v42, s82, 54 +; SI-NEXT: s_or_b32 s82, s78, s77 +; SI-NEXT: s_and_b32 s77, s18, 0xff +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 28 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s70, 24 +; SI-NEXT: s_or_b32 s53, s76, s82 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 27 +; SI-NEXT: s_or_b32 s52, s78, s77 +; SI-NEXT: s_lshl_b32 s77, s17, 8 +; SI-NEXT: v_readlane_b32 s18, v43, 26 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s18, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 25 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: v_writelane_b32 v42, s16, 55 +; SI-NEXT: s_or_b32 s16, s78, s77 +; SI-NEXT: s_and_b32 s77, s89, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 29 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s18, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 22 +; SI-NEXT: v_readlane_b32 s18, v43, 21 +; SI-NEXT: s_or_b32 s54, s78, s77 +; SI-NEXT: s_or_b32 s55, s76, s16 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: s_lshl_b32 s77, s18, 8 +; SI-NEXT: v_readlane_b32 s17, v43, 20 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 19 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: v_readlane_b32 s17, v43, 24 +; SI-NEXT: s_or_b32 s83, s78, s77 +; SI-NEXT: s_and_b32 s77, s17, 0xff +; SI-NEXT: v_readlane_b32 s17, v43, 23 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s17, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: v_readlane_b32 s17, v43, 17 +; SI-NEXT: v_readlane_b32 s18, v43, 16 +; SI-NEXT: s_or_b32 s64, s78, s77 +; SI-NEXT: s_or_b32 s65, s76, s83 +; SI-NEXT: s_and_b32 s76, s17, 0xff +; SI-NEXT: s_lshl_b32 s77, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v43, 15 +; SI-NEXT: s_or_b32 s76, s76, s77 +; SI-NEXT: s_and_b32 s77, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 14 +; SI-NEXT: v_writelane_b32 v42, s89, 56 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s18, 24 +; SI-NEXT: v_writelane_b32 v42, s70, 57 +; SI-NEXT: s_or_b32 s85, s78, s77 +; SI-NEXT: s_and_b32 s77, s44, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 18 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_and_b32 s9, s9, 0xffff +; SI-NEXT: s_and_b32 s11, s11, 0xffff +; SI-NEXT: s_and_b32 s13, s13, 0xffff +; SI-NEXT: s_and_b32 s15, s15, 0xffff +; SI-NEXT: s_and_b32 s43, s43, 0xffff +; SI-NEXT: v_writelane_b32 v42, s69, 58 +; SI-NEXT: s_lshl_b32 s77, s77, 16 +; SI-NEXT: s_lshl_b32 s78, s18, 24 +; SI-NEXT: s_and_b32 s76, s76, 0xffff +; SI-NEXT: s_and_b32 s44, s29, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s37 +; SI-NEXT: s_or_b32 s9, s9, s68 +; SI-NEXT: s_or_b32 s11, s11, s99 +; SI-NEXT: s_or_b32 s13, s13, s92 +; SI-NEXT: s_or_b32 s15, s15, s90 +; SI-NEXT: s_or_b32 s43, s43, s35 +; SI-NEXT: v_writelane_b32 v42, s30, 59 +; SI-NEXT: s_mov_b32 s23, s91 +; SI-NEXT: s_mov_b32 s91, s36 +; SI-NEXT: s_or_b32 s66, s78, s77 +; SI-NEXT: s_or_b32 s67, s76, s85 +; SI-NEXT: s_and_b32 s45, s45, 0xffff +; SI-NEXT: s_and_b32 s46, s46, 0xffff +; SI-NEXT: s_and_b32 s47, s47, 0xffff +; SI-NEXT: s_and_b32 s56, s56, 0xffff +; SI-NEXT: s_and_b32 s57, s57, 0xffff +; SI-NEXT: s_and_b32 s30, s58, 0xffff +; SI-NEXT: s_and_b32 s34, s59, 0xffff +; SI-NEXT: s_and_b32 s36, s60, 0xffff +; SI-NEXT: s_and_b32 s97, s61, 0xffff +; SI-NEXT: s_and_b32 s86, s62, 0xffff +; SI-NEXT: s_and_b32 s98, s63, 0xffff +; SI-NEXT: s_and_b32 s17, s72, 0xffff +; SI-NEXT: s_and_b32 s87, s73, 0xffff +; SI-NEXT: s_and_b32 s96, s74, 0xffff +; SI-NEXT: s_and_b32 s22, s75, 0xffff +; SI-NEXT: s_or_b32 s74, s44, s4 +; SI-NEXT: s_mov_b32 s75, s5 +; SI-NEXT: s_lshr_b64 s[76:77], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 16 +; SI-NEXT: s_mov_b32 s70, s93 +; SI-NEXT: s_mov_b32 s69, s95 +; SI-NEXT: s_mov_b32 s93, s28 +; SI-NEXT: s_or_b32 s72, s45, s6 +; SI-NEXT: s_mov_b32 s73, s7 +; SI-NEXT: s_lshr_b64 s[26:27], s[6:7], 16 +; SI-NEXT: s_or_b32 s62, s46, s8 +; SI-NEXT: s_mov_b32 s63, s9 +; SI-NEXT: s_lshr_b64 s[28:29], s[8:9], 16 +; SI-NEXT: s_or_b32 s60, s47, s10 +; SI-NEXT: s_mov_b32 s61, s11 +; SI-NEXT: s_lshr_b64 s[88:89], s[10:11], 16 +; SI-NEXT: s_or_b32 s58, s56, s12 +; SI-NEXT: s_mov_b32 s59, s13 +; SI-NEXT: s_lshr_b64 s[20:21], s[12:13], 16 +; SI-NEXT: s_or_b32 s56, s57, s14 +; SI-NEXT: s_mov_b32 s57, s15 +; SI-NEXT: s_lshr_b64 s[24:25], s[14:15], 16 +; SI-NEXT: s_or_b32 s46, s30, s40 +; SI-NEXT: s_mov_b32 s47, s41 +; SI-NEXT: s_or_b32 s44, s34, s42 +; SI-NEXT: s_mov_b32 s34, s4 +; SI-NEXT: s_mov_b32 s45, s43 +; SI-NEXT: s_lshr_b64 s[94:95], s[42:43], 16 +; SI-NEXT: s_or_b32 s42, s36, vcc_lo +; SI-NEXT: s_mov_b32 s43, vcc_hi +; SI-NEXT: s_lshr_b64 vcc, vcc, 16 +; SI-NEXT: s_or_b32 s40, s97, s38 +; SI-NEXT: s_mov_b32 s41, s39 +; SI-NEXT: s_lshr_b64 s[38:39], s[38:39], 16 +; SI-NEXT: s_or_b32 s14, s86, s48 +; SI-NEXT: s_mov_b32 s15, s49 +; SI-NEXT: s_lshr_b64 s[48:49], s[48:49], 16 +; SI-NEXT: s_or_b32 s12, s98, s50 +; SI-NEXT: s_mov_b32 s13, s51 +; SI-NEXT: s_lshr_b64 s[50:51], s[50:51], 16 +; SI-NEXT: s_or_b32 s10, s17, s52 +; SI-NEXT: s_mov_b32 s11, s53 +; SI-NEXT: s_lshr_b64 s[52:53], s[52:53], 16 +; SI-NEXT: s_or_b32 s8, s87, s54 +; SI-NEXT: s_mov_b32 s9, s55 +; SI-NEXT: s_lshr_b64 s[54:55], s[54:55], 16 +; SI-NEXT: s_or_b32 s6, s96, s64 +; SI-NEXT: s_mov_b32 s7, s65 +; SI-NEXT: s_lshr_b64 s[64:65], s[64:65], 16 +; SI-NEXT: s_or_b32 s4, s22, s66 +; SI-NEXT: s_mov_b32 s5, s67 +; SI-NEXT: s_lshr_b64 s[66:67], s[66:67], 16 +; SI-NEXT: v_readlane_b32 s17, v42, 51 +; SI-NEXT: s_lshr_b32 s55, s17, 16 +; SI-NEXT: s_lshr_b32 s53, s37, 16 +; SI-NEXT: s_lshr_b32 s51, s68, 16 +; SI-NEXT: s_lshr_b32 s49, s99, 16 +; SI-NEXT: s_lshr_b32 s86, s92, 16 +; SI-NEXT: s_lshr_b32 s39, s90, 16 +; SI-NEXT: s_lshr_b32 s18, s31, 16 +; SI-NEXT: s_lshr_b32 s22, s35, 16 +; SI-NEXT: s_lshr_b32 s97, s19, 16 +; SI-NEXT: s_lshr_b32 s65, s71, 16 +; SI-NEXT: s_lshr_b32 s19, s80, 16 +; SI-NEXT: s_lshr_b32 s71, s81, 16 +; SI-NEXT: s_lshr_b32 s67, s82, 16 +; SI-NEXT: v_readlane_b32 s82, v42, 54 +; SI-NEXT: v_readlane_b32 s96, v42, 53 +; SI-NEXT: s_lshr_b32 s80, s16, 16 +; SI-NEXT: v_readlane_b32 s16, v42, 55 +; SI-NEXT: s_lshr_b32 s81, s83, 16 +; SI-NEXT: s_mov_b32 s90, s93 +; SI-NEXT: v_readlane_b32 s78, v42, 52 +; SI-NEXT: s_mov_b32 s95, s69 +; SI-NEXT: s_mov_b32 s93, s70 +; SI-NEXT: v_readlane_b32 s30, v42, 59 +; SI-NEXT: v_readlane_b32 s69, v42, 58 +; SI-NEXT: v_readlane_b32 s70, v42, 57 +; SI-NEXT: v_readlane_b32 s89, v42, 56 +; SI-NEXT: s_lshr_b32 s77, s85, 16 +; SI-NEXT: s_mov_b32 s84, vcc_lo +; SI-NEXT: s_mov_b32 s36, s91 +; SI-NEXT: s_mov_b32 s91, s23 +; SI-NEXT: s_cbranch_execnz .LBB97_3 +; SI-NEXT: .LBB97_2: ; %cmp.true +; SI-NEXT: v_readlane_b32 s4, v43, 42 ; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 17 -; SI-NEXT: v_readlane_b32 s6, v62, 6 +; SI-NEXT: v_readlane_b32 s6, v43, 41 ; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 +; SI-NEXT: s_lshl_b32 s5, s90, 8 ; SI-NEXT: s_add_i32 s6, s6, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readlane_b32 s5, v43, 18 ; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 +; SI-NEXT: s_lshl_b32 s5, s5, 24 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s18, s4, 0x3000000 -; SI-NEXT: s_add_i32 s4, s20, 3 -; SI-NEXT: v_readlane_b32 s5, v62, 5 -; SI-NEXT: v_readlane_b32 s6, v62, 4 -; SI-NEXT: s_and_b32 s4, s4, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 +; SI-NEXT: v_readlane_b32 s5, v43, 17 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: v_readlane_b32 s6, v43, 16 +; SI-NEXT: v_readlane_b32 s7, v43, 15 +; SI-NEXT: s_and_b32 s5, s5, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: s_add_i32 s7, s7, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_readlane_b32 s6, v43, 14 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_readlane_b32 s6, v43, 44 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 3 +; SI-NEXT: v_readlane_b32 s8, v43, 24 ; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s5, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s42, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v22, s8 -; SI-NEXT: v_mov_b32_e32 v20, s10 -; SI-NEXT: v_mov_b32_e32 v19, s12 -; SI-NEXT: v_mov_b32_e32 v18, s13 -; SI-NEXT: v_add_i32_e32 v45, vcc, 0x3000000, v5 -; SI-NEXT: v_mov_b32_e32 v5, s14 -; SI-NEXT: v_add_i32_e32 v47, vcc, 0x3000000, v6 -; SI-NEXT: v_mov_b32_e32 v6, s15 -; SI-NEXT: v_alignbit_b32 v57, s42, v13, 16 -; SI-NEXT: v_mov_b32_e32 v13, s17 -; SI-NEXT: v_alignbit_b32 v58, s40, v13, 16 -; SI-NEXT: v_alignbit_b32 v56, v6, v47, 16 -; SI-NEXT: v_alignbit_b32 v51, v5, v45, 16 -; SI-NEXT: v_alignbit_b32 v1, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v3, v19, v16, 16 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_alignbit_b32 v44, v20, v15, 16 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v43, v22, v12, 16 -; SI-NEXT: v_alignbit_b32 v42, v23, v10, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v9, 16 -; SI-NEXT: v_alignbit_b32 v40, v25, v8, 16 -; SI-NEXT: v_alignbit_b32 v55, v26, v7, 16 -; SI-NEXT: v_alignbit_b32 v54, v27, v4, 16 -; SI-NEXT: v_alignbit_b32 v53, v28, v2, 16 -; SI-NEXT: v_alignbit_b32 v52, v39, v31, 16 -; SI-NEXT: v_alignbit_b32 v28, v30, v49, 16 -; SI-NEXT: s_lshr_b32 s73, s42, 16 -; SI-NEXT: s_lshr_b32 s72, s40, 16 -; SI-NEXT: s_lshr_b32 s63, s15, 16 -; SI-NEXT: s_lshr_b32 s62, s14, 16 -; SI-NEXT: s_lshr_b32 s61, s13, 16 -; SI-NEXT: s_lshr_b32 s60, s12, 16 -; SI-NEXT: s_lshr_b32 s59, s10, 16 -; SI-NEXT: s_lshr_b32 s58, s8, 16 -; SI-NEXT: s_lshr_b32 s57, s36, 16 -; SI-NEXT: s_lshr_b32 s56, s53, 16 -; SI-NEXT: s_lshr_b32 s47, s94, 16 -; SI-NEXT: s_lshr_b32 s46, s49, 16 -; SI-NEXT: s_lshr_b32 s45, s48, 16 -; SI-NEXT: s_lshr_b32 s44, s11, 16 -; SI-NEXT: s_lshr_b32 s43, s9, 16 -; SI-NEXT: s_lshr_b32 s41, s16, 16 +; SI-NEXT: s_lshl_b32 s7, s91, 8 +; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readlane_b32 s7, v43, 23 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_or_b32 s7, s7, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readlane_b32 s7, v43, 22 +; SI-NEXT: s_add_i32 s7, s7, 3 +; SI-NEXT: v_readlane_b32 s8, v43, 21 +; SI-NEXT: v_readlane_b32 s9, v43, 20 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 8 +; SI-NEXT: s_add_i32 s9, s9, 3 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_readlane_b32 s8, v43, 19 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_readlane_b32 s8, v43, 43 +; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s9, s78, 8 +; SI-NEXT: s_add_i32 s10, s89, 3 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readlane_b32 s9, v43, 29 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 24 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_addk_i32 s8, 0x300 +; SI-NEXT: s_or_b32 s9, s9, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readlane_b32 s9, v43, 28 +; SI-NEXT: s_add_i32 s9, s9, 3 +; SI-NEXT: v_readlane_b32 s10, v43, 27 +; SI-NEXT: v_readlane_b32 s11, v43, 26 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 8 +; SI-NEXT: s_add_i32 s11, s11, 3 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_readlane_b32 s10, v43, 25 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 24 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_addk_i32 s9, 0x300 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s9, s9, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_readlane_b32 s10, v43, 46 +; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: v_readlane_b32 s11, v43, 45 +; SI-NEXT: v_readlane_b32 s12, v43, 32 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 8 +; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s11, s70, 24 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_addk_i32 s10, 0x300 +; SI-NEXT: s_or_b32 s11, s11, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_readlane_b32 s11, v43, 31 +; SI-NEXT: s_add_i32 s11, s11, 3 +; SI-NEXT: v_readlane_b32 s13, v43, 30 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: s_lshl_b32 s12, s96, 8 +; SI-NEXT: s_add_i32 s13, s13, 3 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: s_lshl_b32 s12, s82, 24 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_addk_i32 s11, 0x300 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: s_and_b32 s11, s11, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: s_add_i32 s12, s36, 3 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s13, s16, 8 +; SI-NEXT: s_add_i32 s14, s30, 3 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: s_lshl_b32 s13, s69, 24 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_addk_i32 s12, 0x300 +; SI-NEXT: s_or_b32 s13, s13, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readlane_b32 s13, v43, 36 +; SI-NEXT: s_add_i32 s13, s13, 3 +; SI-NEXT: v_readlane_b32 s14, v43, 35 +; SI-NEXT: v_readlane_b32 s15, v43, 34 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_readlane_b32 s14, v43, 33 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_addk_i32 s13, 0x300 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: s_and_b32 s13, s13, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_readlane_b32 s14, v42, 50 +; SI-NEXT: s_add_i32 s17, s14, 3 +; SI-NEXT: v_readlane_b32 s15, v42, 49 +; SI-NEXT: s_and_b32 s14, s17, 0xff +; SI-NEXT: s_lshl_b32 s15, s15, 8 +; SI-NEXT: s_add_i32 s16, s95, 3 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s15, s93, 24 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_addk_i32 s14, 0x300 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_readlane_b32 s15, v43, 40 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: v_readlane_b32 s16, v43, 39 +; SI-NEXT: v_readlane_b32 s17, v43, 38 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_readlane_b32 s16, v43, 37 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_addk_i32 s15, 0x300 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s15, s15, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_readlane_b32 s16, v42, 48 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 47 +; SI-NEXT: v_readlane_b32 s18, v42, 42 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s99, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 43 +; SI-NEXT: s_and_b32 s18, s99, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 38 +; SI-NEXT: s_add_i32 s87, s17, 3 +; SI-NEXT: v_readlane_b32 s18, v42, 35 +; SI-NEXT: v_readlane_b32 s19, v42, 29 +; SI-NEXT: s_and_b32 s17, s87, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 8 +; SI-NEXT: s_add_i32 s23, s19, 3 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v42, 30 +; SI-NEXT: s_and_b32 s23, s23, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_lshl_b32 s23, s23, 16 +; SI-NEXT: s_addk_i32 s17, 0x300 +; SI-NEXT: s_or_b32 s18, s18, s23 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_add_i32 s40, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 45 +; SI-NEXT: s_add_i32 s41, s17, 0x3000000 +; SI-NEXT: s_add_i32 s68, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 44 +; SI-NEXT: v_readlane_b32 s18, v42, 39 +; SI-NEXT: s_and_b32 s16, s68, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s96, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 40 +; SI-NEXT: s_and_b32 s18, s96, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 33 +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: v_readlane_b32 s18, v42, 31 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 8 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v42, 24 +; SI-NEXT: s_addk_i32 s17, 0x300 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s42, s16, 0x3000000 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: v_readlane_b32 s17, v42, 25 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s43, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 46 +; SI-NEXT: s_add_i32 s23, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 41 +; SI-NEXT: v_readlane_b32 s18, v42, 37 +; SI-NEXT: s_and_b32 s16, s23, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s86, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 36 +; SI-NEXT: s_and_b32 s18, s86, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s44, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 28 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 22 +; SI-NEXT: v_readlane_b32 s18, v42, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 15 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s45, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 34 +; SI-NEXT: s_add_i32 s83, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 32 +; SI-NEXT: v_readlane_b32 s18, v42, 26 +; SI-NEXT: s_and_b32 s16, s83, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 27 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s46, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 23 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 21 +; SI-NEXT: v_readlane_b32 s18, v42, 17 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 18 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s47, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 20 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 19 +; SI-NEXT: v_readlane_b32 s18, v42, 13 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 14 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s56, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 12 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 11 +; SI-NEXT: v_readlane_b32 s18, v42, 7 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 8 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s57, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 10 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 9 +; SI-NEXT: v_readlane_b32 s18, v42, 5 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 6 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s58, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 4 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 3 +; SI-NEXT: v_readlane_b32 s18, v43, 63 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v42, 0 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s59, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v42, 2 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v42, 1 +; SI-NEXT: v_readlane_b32 s18, v43, 61 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 62 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s60, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 60 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 59 +; SI-NEXT: v_readlane_b32 s18, v43, 55 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 56 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s61, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 58 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 57 +; SI-NEXT: v_readlane_b32 s18, v43, 53 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 54 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s62, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 52 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 51 +; SI-NEXT: v_readlane_b32 s18, v43, 49 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 50 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s63, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 5 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 4 +; SI-NEXT: v_readlane_b32 s18, v43, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 2 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s72, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 1 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 0 +; SI-NEXT: v_readlane_b32 s18, v43, 47 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 48 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s73, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 13 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 12 +; SI-NEXT: v_readlane_b32 s18, v43, 11 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 10 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s74, s16, 0x3000000 +; SI-NEXT: v_readlane_b32 s16, v43, 9 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v43, 8 +; SI-NEXT: v_readlane_b32 s18, v43, 7 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readlane_b32 s17, v43, 6 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_addk_i32 s16, 0x300 +; SI-NEXT: s_lshl_b32 s17, s17, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_add_i32 s8, s8, 0x3000000 +; SI-NEXT: s_add_i32 s9, s9, 0x3000000 +; SI-NEXT: s_add_i32 s10, s10, 0x3000000 +; SI-NEXT: s_add_i32 s11, s11, 0x3000000 +; SI-NEXT: s_add_i32 s12, s12, 0x3000000 +; SI-NEXT: s_add_i32 s13, s13, 0x3000000 +; SI-NEXT: s_add_i32 s14, s14, 0x3000000 +; SI-NEXT: s_add_i32 s15, s15, 0x3000000 +; SI-NEXT: s_add_i32 s75, s16, 0x3000000 +; SI-NEXT: s_lshr_b64 s[76:77], s[74:75], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[48:49], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[50:51], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[54:55], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[64:65], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[66:67], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[72:73], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[60:61], 16 +; SI-NEXT: s_lshr_b64 s[20:21], s[58:59], 16 +; SI-NEXT: s_lshr_b64 s[24:25], s[56:57], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[46:47], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[84:85], s[42:43], 16 +; SI-NEXT: s_lshr_b32 s55, s75, 16 +; SI-NEXT: s_lshr_b32 s53, s73, 16 +; SI-NEXT: s_lshr_b32 s51, s63, 16 +; SI-NEXT: s_lshr_b32 s49, s61, 16 +; SI-NEXT: s_lshr_b32 s86, s59, 16 +; SI-NEXT: s_lshr_b32 s39, s57, 16 +; SI-NEXT: s_lshr_b32 s18, s47, 16 +; SI-NEXT: s_lshr_b32 s22, s45, 16 +; SI-NEXT: s_lshr_b32 s97, s43, 16 +; SI-NEXT: s_lshr_b32 s65, s41, 16 +; SI-NEXT: s_lshr_b32 s19, s15, 16 +; SI-NEXT: s_lshr_b32 s71, s13, 16 +; SI-NEXT: s_lshr_b32 s67, s11, 16 +; SI-NEXT: s_lshr_b32 s80, s9, 16 +; SI-NEXT: s_lshr_b32 s81, s7, 16 +; SI-NEXT: s_lshr_b32 s77, s5, 16 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v57 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s16, s74, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s75, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s72, 0xffff +; SI-NEXT: s_lshl_b32 s17, s26, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v58 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s73, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v47 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s62, 0xffff +; SI-NEXT: s_lshl_b32 s17, s28, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v56 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s63, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v45 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s60, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v51 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s61, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s58, 0xffff +; SI-NEXT: s_lshl_b32 s17, s20, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v1 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s59, 0xffff +; SI-NEXT: s_lshl_b32 s17, s86, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v16 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s56, 0xffff +; SI-NEXT: s_lshl_b32 s17, s24, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v3 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s57, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 44, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v15 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s46, 0xffff +; SI-NEXT: s_lshl_b32 s17, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v44 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s47, 0xffff +; SI-NEXT: s_lshl_b32 s17, s18, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v12 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s44, 0xffff +; SI-NEXT: s_lshl_b32 s17, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v43 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s45, 0xffff +; SI-NEXT: s_lshl_b32 s17, s22, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 60, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s84, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v42 -; SI-NEXT: s_and_b32 s4, s36, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s97, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x44, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v41 -; SI-NEXT: s_and_b32 s4, s53, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s65, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x4c, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v40 -; SI-NEXT: s_and_b32 s4, s94, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s19, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x54, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v7 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v55 -; SI-NEXT: s_and_b32 s4, s49, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s71, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v54 -; SI-NEXT: s_and_b32 s4, s48, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x64, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v53 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s67, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s54, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s80, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s64, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s81, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s66, 16 ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 -; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_or_b32 s4, s4, s6 ; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v49 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v28 -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s99, v63, 35 -; SI-NEXT: v_readlane_b32 s98, v63, 34 -; SI-NEXT: v_readlane_b32 s97, v63, 33 -; SI-NEXT: v_readlane_b32 s96, v63, 32 -; SI-NEXT: v_readlane_b32 s87, v63, 31 -; SI-NEXT: v_readlane_b32 s86, v63, 30 -; SI-NEXT: v_readlane_b32 s85, v63, 29 -; SI-NEXT: v_readlane_b32 s84, v63, 28 -; SI-NEXT: v_readlane_b32 s83, v63, 27 -; SI-NEXT: v_readlane_b32 s82, v63, 26 -; SI-NEXT: v_readlane_b32 s81, v63, 25 -; SI-NEXT: v_readlane_b32 s80, v63, 24 -; SI-NEXT: v_readlane_b32 s71, v63, 23 -; SI-NEXT: v_readlane_b32 s70, v63, 22 -; SI-NEXT: v_readlane_b32 s69, v63, 21 -; SI-NEXT: v_readlane_b32 s68, v63, 20 -; SI-NEXT: v_readlane_b32 s67, v63, 19 -; SI-NEXT: v_readlane_b32 s66, v63, 18 -; SI-NEXT: v_readlane_b32 s65, v63, 17 -; SI-NEXT: v_readlane_b32 s64, v63, 16 -; SI-NEXT: v_readlane_b32 s55, v63, 15 -; SI-NEXT: v_readlane_b32 s54, v63, 14 -; SI-NEXT: v_readlane_b32 s53, v63, 13 -; SI-NEXT: v_readlane_b32 s52, v63, 12 -; SI-NEXT: v_readlane_b32 s51, v63, 11 -; SI-NEXT: v_readlane_b32 s50, v63, 10 -; SI-NEXT: v_readlane_b32 s49, v63, 9 -; SI-NEXT: v_readlane_b32 s48, v63, 8 -; SI-NEXT: v_readlane_b32 s39, v63, 7 -; SI-NEXT: v_readlane_b32 s38, v63, 6 -; SI-NEXT: v_readlane_b32 s37, v63, 5 -; SI-NEXT: v_readlane_b32 s36, v63, 4 -; SI-NEXT: v_readlane_b32 s35, v63, 3 -; SI-NEXT: v_readlane_b32 s34, v63, 2 -; SI-NEXT: v_readlane_b32 s31, v63, 1 -; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s99, v41, 35 +; SI-NEXT: v_readlane_b32 s98, v41, 34 +; SI-NEXT: v_readlane_b32 s97, v41, 33 +; SI-NEXT: v_readlane_b32 s96, v41, 32 +; SI-NEXT: v_readlane_b32 s87, v41, 31 +; SI-NEXT: v_readlane_b32 s86, v41, 30 +; SI-NEXT: v_readlane_b32 s85, v41, 29 +; SI-NEXT: v_readlane_b32 s84, v41, 28 +; SI-NEXT: v_readlane_b32 s83, v41, 27 +; SI-NEXT: v_readlane_b32 s82, v41, 26 +; SI-NEXT: v_readlane_b32 s81, v41, 25 +; SI-NEXT: v_readlane_b32 s80, v41, 24 +; SI-NEXT: v_readlane_b32 s71, v41, 23 +; SI-NEXT: v_readlane_b32 s70, v41, 22 +; SI-NEXT: v_readlane_b32 s69, v41, 21 +; SI-NEXT: v_readlane_b32 s68, v41, 20 +; SI-NEXT: v_readlane_b32 s67, v41, 19 +; SI-NEXT: v_readlane_b32 s66, v41, 18 +; SI-NEXT: v_readlane_b32 s65, v41, 17 +; SI-NEXT: v_readlane_b32 s64, v41, 16 +; SI-NEXT: v_readlane_b32 s55, v41, 15 +; SI-NEXT: v_readlane_b32 s54, v41, 14 +; SI-NEXT: v_readlane_b32 s53, v41, 13 +; SI-NEXT: v_readlane_b32 s52, v41, 12 +; SI-NEXT: v_readlane_b32 s51, v41, 11 +; SI-NEXT: v_readlane_b32 s50, v41, 10 +; SI-NEXT: v_readlane_b32 s49, v41, 9 +; SI-NEXT: v_readlane_b32 s48, v41, 8 +; SI-NEXT: v_readlane_b32 s39, v41, 7 +; SI-NEXT: v_readlane_b32 s38, v41, 6 +; SI-NEXT: v_readlane_b32 s37, v41, 5 +; SI-NEXT: v_readlane_b32 s36, v41, 4 +; SI-NEXT: v_readlane_b32 s35, v41, 3 +; SI-NEXT: v_readlane_b32 s34, v41, 2 +; SI-NEXT: v_readlane_b32 s31, v41, 1 +; SI-NEXT: v_readlane_b32 s30, v41, 0 ; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: -; SI-NEXT: ; implicit-def: $sgpr18 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $sgpr17 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr55 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr53 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $sgpr36 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $sgpr86 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $sgpr94 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $sgpr48 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr18 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $sgpr16 -; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr97 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr19 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v128i8_to_v64i16_scalar: @@ -208926,1387 +210637,1672 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 -; SI-NEXT: v_writelane_b32 v63, s30, 0 -; SI-NEXT: v_writelane_b32 v63, s31, 1 -; SI-NEXT: v_writelane_b32 v63, s34, 2 -; SI-NEXT: v_writelane_b32 v63, s35, 3 -; SI-NEXT: v_writelane_b32 v63, s36, 4 -; SI-NEXT: v_writelane_b32 v63, s37, 5 -; SI-NEXT: v_writelane_b32 v63, s38, 6 -; SI-NEXT: v_writelane_b32 v63, s39, 7 -; SI-NEXT: v_writelane_b32 v63, s48, 8 -; SI-NEXT: v_writelane_b32 v63, s49, 9 -; SI-NEXT: v_writelane_b32 v63, s50, 10 -; SI-NEXT: v_writelane_b32 v63, s51, 11 -; SI-NEXT: v_writelane_b32 v63, s52, 12 -; SI-NEXT: v_writelane_b32 v63, s53, 13 -; SI-NEXT: v_writelane_b32 v63, s54, 14 -; SI-NEXT: v_writelane_b32 v63, s55, 15 -; SI-NEXT: v_writelane_b32 v63, s64, 16 -; SI-NEXT: v_writelane_b32 v63, s65, 17 -; SI-NEXT: v_writelane_b32 v63, s66, 18 -; SI-NEXT: v_writelane_b32 v63, s67, 19 -; SI-NEXT: v_writelane_b32 v63, s68, 20 -; SI-NEXT: v_writelane_b32 v63, s69, 21 -; SI-NEXT: v_writelane_b32 v63, s70, 22 -; SI-NEXT: v_writelane_b32 v63, s71, 23 -; SI-NEXT: v_writelane_b32 v63, s80, 24 -; SI-NEXT: v_writelane_b32 v63, s81, 25 -; SI-NEXT: v_writelane_b32 v63, s82, 26 -; SI-NEXT: v_writelane_b32 v63, s83, 27 -; SI-NEXT: v_writelane_b32 v63, s84, 28 -; SI-NEXT: v_writelane_b32 v63, s85, 29 -; SI-NEXT: v_writelane_b32 v63, s86, 30 -; SI-NEXT: v_writelane_b32 v63, s87, 31 -; SI-NEXT: v_writelane_b32 v63, s96, 32 -; SI-NEXT: v_writelane_b32 v63, s97, 33 -; SI-NEXT: v_writelane_b32 v63, s98, 34 -; SI-NEXT: s_mov_b32 s6, s18 -; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane -; SI-NEXT: v_writelane_b32 v63, s99, 35 -; SI-NEXT: v_readfirstlane_b32 s62, v30 -; SI-NEXT: v_readfirstlane_b32 s63, v29 -; SI-NEXT: v_readfirstlane_b32 s59, v26 -; SI-NEXT: v_readfirstlane_b32 s60, v25 -; SI-NEXT: v_readfirstlane_b32 s98, v22 -; SI-NEXT: v_readfirstlane_b32 s61, v21 -; SI-NEXT: v_readfirstlane_b32 s99, v18 -; SI-NEXT: v_readfirstlane_b32 s58, v17 -; SI-NEXT: v_readfirstlane_b32 s96, v14 -; SI-NEXT: v_readfirstlane_b32 s97, v13 -; SI-NEXT: v_readfirstlane_b32 s86, v10 -; SI-NEXT: v_readfirstlane_b32 s87, v9 -; SI-NEXT: v_readfirstlane_b32 s84, v6 -; SI-NEXT: v_readfirstlane_b32 s85, v5 -; SI-NEXT: v_readfirstlane_b32 s81, v2 -; SI-NEXT: v_readfirstlane_b32 s82, v1 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_writelane_b32 v62, s6, 0 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s88, v32 -; SI-NEXT: v_readfirstlane_b32 s18, v34 -; SI-NEXT: v_readfirstlane_b32 s78, v38 -; SI-NEXT: v_readfirstlane_b32 s79, v39 -; SI-NEXT: v_readfirstlane_b32 s76, v48 -; SI-NEXT: v_readfirstlane_b32 s77, v49 -; SI-NEXT: v_readfirstlane_b32 s74, v50 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_readfirstlane_b32 s75, v51 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_readfirstlane_b32 s72, v52 -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_readfirstlane_b32 s73, v53 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v54 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v24 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v40 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v41 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v42 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v43 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v55 -; SI-NEXT: s_cbranch_scc0 .LBB99_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s56, s4, s5 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s57, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s56 -; SI-NEXT: v_alignbit_b32 v2, s57, v1, 24 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, s57, v1, 16 -; SI-NEXT: v_alignbit_b32 v1, s57, v1, 8 -; SI-NEXT: s_or_b32 s46, s4, s5 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_or_b32 s47, s4, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v1, s46 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, s47, v1, 24 -; SI-NEXT: s_or_b32 s44, s4, s5 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, s47, v1, 16 -; SI-NEXT: v_alignbit_b32 v51, s47, v1, 8 -; SI-NEXT: s_or_b32 s45, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s44 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, s45, v1, 24 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, s45, v1, 16 -; SI-NEXT: v_alignbit_b32 v1, s45, v1, 8 -; SI-NEXT: s_or_b32 s42, s4, s5 -; SI-NEXT: s_and_b32 s4, s82, 0xffff -; SI-NEXT: s_lshl_b32 s5, s81, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_or_b32 s43, s4, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v1, s42 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, s43, v1, 24 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, s43, v1, 16 -; SI-NEXT: v_alignbit_b32 v1, s43, v1, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s85, 0xffff -; SI-NEXT: s_lshl_b32 s5, s84, 16 -; SI-NEXT: v_or_b32_e32 v16, v1, v25 -; SI-NEXT: s_or_b32 s41, s4, s5 -; SI-NEXT: v_alignbit_b32 v1, s41, v16, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s41, v16, 16 -; SI-NEXT: s_and_b32 s4, s87, 0xffff -; SI-NEXT: s_lshl_b32 s5, s86, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s41, v16, 8 -; SI-NEXT: s_or_b32 s40, s4, s5 -; SI-NEXT: s_and_b32 s4, s97, 0xffff -; SI-NEXT: s_lshl_b32 s5, s96, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 -; SI-NEXT: s_or_b32 s15, s4, s5 -; SI-NEXT: s_and_b32 s4, s58, 0xffff -; SI-NEXT: s_lshl_b32 s5, s99, 16 -; SI-NEXT: v_or_b32_e32 v14, v1, v26 -; SI-NEXT: s_or_b32 s14, s4, s5 -; SI-NEXT: s_and_b32 s4, s61, 0xffff -; SI-NEXT: s_lshl_b32 s5, s98, 16 -; SI-NEXT: v_alignbit_b32 v1, s40, v14, 24 -; SI-NEXT: s_or_b32 s13, s4, s5 -; SI-NEXT: s_and_b32 s4, s60, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s40, v14, 16 -; SI-NEXT: s_or_b32 s12, s4, s5 -; SI-NEXT: s_and_b32 s4, s63, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s40, v14, 8 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: s_and_b32 s4, s73, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: s_or_b32 s10, s4, s5 -; SI-NEXT: s_and_b32 s4, s75, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: v_or_b32_e32 v12, v1, v29 -; SI-NEXT: s_or_b32 s9, s4, s5 -; SI-NEXT: s_and_b32 s4, s77, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: v_alignbit_b32 v1, s15, v12, 24 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: s_and_b32 s4, s79, 0xffff -; SI-NEXT: s_lshl_b32 s5, s78, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s15, v12, 16 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s88, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s15, v12, 8 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_lshr_b32 s4, s11, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 -; SI-NEXT: v_writelane_b32 v62, s4, 1 -; SI-NEXT: s_lshr_b32 s4, s10, 8 -; SI-NEXT: v_or_b32_e32 v10, v1, v30 -; SI-NEXT: v_writelane_b32 v62, s4, 3 -; SI-NEXT: s_lshr_b32 s4, s9, 8 -; SI-NEXT: v_alignbit_b32 v1, s14, v10, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 6 -; SI-NEXT: s_lshr_b32 s4, s8, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s14, v10, 16 -; SI-NEXT: v_writelane_b32 v62, s4, 9 -; SI-NEXT: s_lshr_b32 s4, s7, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, s14, v10, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 12 -; SI-NEXT: s_lshr_b32 s4, s6, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v19 -; SI-NEXT: v_writelane_b32 v62, s4, 15 -; SI-NEXT: s_and_b32 s4, s72, 0xffff -; SI-NEXT: v_or_b32_e32 v8, v1, v32 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v23 -; SI-NEXT: v_writelane_b32 v62, s4, 2 -; SI-NEXT: s_and_b32 s4, s74, 0xffff -; SI-NEXT: v_or_b32_e32 v5, v1, v34 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v27 -; SI-NEXT: v_writelane_b32 v62, s4, 5 -; SI-NEXT: s_and_b32 s4, s76, 0xffff -; SI-NEXT: v_or_b32_e32 v13, v1, v28 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v31 -; SI-NEXT: v_writelane_b32 v62, s4, 8 -; SI-NEXT: s_and_b32 s4, s78, 0xffff -; SI-NEXT: v_or_b32_e32 v9, v1, v38 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 -; SI-NEXT: v_writelane_b32 v62, s4, 11 -; SI-NEXT: s_and_b32 s4, s88, 0xffff -; SI-NEXT: v_or_b32_e32 v6, v1, v39 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 -; SI-NEXT: v_writelane_b32 v62, s4, 14 -; SI-NEXT: s_bfe_u32 s4, s74, 0x80008 -; SI-NEXT: v_or_b32_e32 v4, v1, v48 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v36 -; SI-NEXT: v_writelane_b32 v62, s4, 4 -; SI-NEXT: s_bfe_u32 s4, s76, 0x80008 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v2, v1, v49 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v37 -; SI-NEXT: v_writelane_b32 v62, s4, 7 -; SI-NEXT: s_bfe_u32 s4, s78, 0x80008 -; SI-NEXT: v_or_b32_e32 v1, v1, v50 -; SI-NEXT: v_writelane_b32 v62, s4, 10 -; SI-NEXT: s_bfe_u32 s4, s88, 0x80008 -; SI-NEXT: s_lshr_b32 s68, s57, 8 -; SI-NEXT: s_lshr_b32 s65, s47, 8 -; SI-NEXT: s_lshr_b32 s54, s45, 8 -; SI-NEXT: s_lshr_b32 s51, s43, 8 -; SI-NEXT: s_lshr_b32 s48, s41, 8 -; SI-NEXT: s_lshr_b32 s37, s40, 8 -; SI-NEXT: s_lshr_b32 s34, s15, 8 -; SI-NEXT: s_lshr_b32 s95, s14, 8 -; SI-NEXT: s_lshr_b32 s92, s13, 8 -; SI-NEXT: s_lshr_b32 s89, s12, 8 -; SI-NEXT: s_and_b32 s71, s19, 0xffff -; SI-NEXT: s_and_b32 s69, s23, 0xffff -; SI-NEXT: s_and_b32 s66, s27, 0xffff -; SI-NEXT: s_and_b32 s55, s81, 0xffff -; SI-NEXT: s_and_b32 s52, s84, 0xffff -; SI-NEXT: s_and_b32 s49, s86, 0xffff -; SI-NEXT: s_and_b32 s38, s96, 0xffff -; SI-NEXT: s_and_b32 s35, s99, 0xffff -; SI-NEXT: s_and_b32 s30, s98, 0xffff -; SI-NEXT: s_and_b32 s93, s59, 0xffff -; SI-NEXT: s_and_b32 s90, s62, 0xffff -; SI-NEXT: s_bfe_u32 s83, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s80, s23, 0x80008 -; SI-NEXT: s_bfe_u32 s70, s27, 0x80008 -; SI-NEXT: s_bfe_u32 s67, s81, 0x80008 -; SI-NEXT: s_bfe_u32 s64, s84, 0x80008 -; SI-NEXT: s_bfe_u32 s53, s86, 0x80008 -; SI-NEXT: s_bfe_u32 s50, s96, 0x80008 -; SI-NEXT: s_bfe_u32 s39, s99, 0x80008 -; SI-NEXT: s_bfe_u32 s36, s98, 0x80008 -; SI-NEXT: s_bfe_u32 s31, s59, 0x80008 -; SI-NEXT: s_bfe_u32 s94, s62, 0x80008 -; SI-NEXT: s_bfe_u32 s91, s72, 0x80008 -; SI-NEXT: v_writelane_b32 v62, s4, 13 -; SI-NEXT: v_alignbit_b32 v45, s13, v8, 24 -; SI-NEXT: v_alignbit_b32 v47, s13, v8, 16 -; SI-NEXT: v_alignbit_b32 v57, s13, v8, 8 -; SI-NEXT: v_alignbit_b32 v41, s12, v5, 24 -; SI-NEXT: v_alignbit_b32 v43, s12, v5, 16 -; SI-NEXT: v_alignbit_b32 v44, s12, v5, 8 -; SI-NEXT: v_alignbit_b32 v21, s11, v13, 24 -; SI-NEXT: v_alignbit_b32 v22, s11, v13, 16 -; SI-NEXT: v_alignbit_b32 v24, s11, v13, 8 -; SI-NEXT: v_alignbit_b32 v17, s10, v9, 24 -; SI-NEXT: v_alignbit_b32 v18, s10, v9, 16 -; SI-NEXT: v_alignbit_b32 v20, s10, v9, 8 -; SI-NEXT: v_alignbit_b32 v59, s9, v6, 24 -; SI-NEXT: v_alignbit_b32 v60, s9, v6, 16 -; SI-NEXT: v_alignbit_b32 v61, s9, v6, 8 -; SI-NEXT: v_alignbit_b32 v46, s8, v4, 24 -; SI-NEXT: v_alignbit_b32 v56, s8, v4, 16 -; SI-NEXT: v_alignbit_b32 v58, s8, v4, 8 -; SI-NEXT: v_alignbit_b32 v55, s7, v2, 24 -; SI-NEXT: v_alignbit_b32 v40, s7, v2, 16 -; SI-NEXT: v_alignbit_b32 v42, s7, v2, 8 -; SI-NEXT: v_alignbit_b32 v52, s6, v1, 24 -; SI-NEXT: v_alignbit_b32 v53, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, s6, v1, 8 -; SI-NEXT: s_cbranch_execnz .LBB99_3 -; SI-NEXT: .LBB99_2: ; %cmp.true -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s88, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s79, s79, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s79, 0xffff -; SI-NEXT: s_lshl_b32 s5, s78, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s77, s77, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s77, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s75, s75, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s75, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s73, s73, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s73, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s63, s63, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s63, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s60, s60, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s60, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s61, s61, 3 -; SI-NEXT: s_add_i32 s12, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s61, 0xffff -; SI-NEXT: s_lshl_b32 s5, s98, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s58, s58, 3 -; SI-NEXT: s_add_i32 s13, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s58, 0xffff -; SI-NEXT: s_lshl_b32 s5, s99, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s97, s97, 3 -; SI-NEXT: s_add_i32 s14, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s97, 0xffff -; SI-NEXT: s_lshl_b32 s5, s96, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s87, s87, 3 -; SI-NEXT: s_add_i32 s15, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s87, 0xffff -; SI-NEXT: s_lshl_b32 s5, s86, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s85, s85, 3 -; SI-NEXT: s_add_i32 s40, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s85, 0xffff -; SI-NEXT: s_lshl_b32 s5, s84, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s41, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s82, s82, 3 -; SI-NEXT: s_add_i32 s42, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s82, 0xffff -; SI-NEXT: s_lshl_b32 s5, s81, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s43, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s44, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s45, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s46, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s47, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s56, s4, 0x30000 -; SI-NEXT: v_readlane_b32 s4, v62, 0 -; SI-NEXT: s_add_i32 s4, s4, 3 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s57, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v21, s56 -; SI-NEXT: v_alignbit_b32 v22, s57, v21, 24 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s57, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s57, v21, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v21, s46 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s47, v21, 24 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s47, v21, 16 -; SI-NEXT: v_alignbit_b32 v51, s47, v21, 8 -; SI-NEXT: v_mov_b32_e32 v21, s44 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s45, v21, 24 -; SI-NEXT: s_lshr_b32 s4, s11, 8 -; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s45, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s45, v21, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 1 -; SI-NEXT: s_lshr_b32 s4, s10, 16 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v33 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v21, s42 -; SI-NEXT: v_writelane_b32 v62, s4, 2 -; SI-NEXT: s_lshr_b32 s4, s10, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v7 -; SI-NEXT: v_or_b32_e32 v3, v25, v3 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s43, v21, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 3 -; SI-NEXT: s_lshr_b32 s4, s9, 24 -; SI-NEXT: v_or_b32_e32 v5, v39, v5 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v3 -; SI-NEXT: v_mov_b32_e32 v3, s41 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, s43, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s43, v21, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 4 -; SI-NEXT: s_lshr_b32 s4, s9, 16 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x30000, v5 -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v31 -; SI-NEXT: v_or_b32_e32 v7, v26, v7 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v3, v16, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 5 -; SI-NEXT: s_lshr_b32 s4, s9, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v11 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v7 -; SI-NEXT: v_mov_b32_e32 v7, s40 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v3, v16, 16 -; SI-NEXT: v_alignbit_b32 v3, v3, v16, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 6 -; SI-NEXT: s_lshr_b32 s4, s8, 24 -; SI-NEXT: v_or_b32_e32 v5, v38, v5 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v7, v14, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 7 -; SI-NEXT: s_lshr_b32 s4, s8, 16 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v27 -; SI-NEXT: v_or_b32_e32 v11, v29, v11 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v7, v14, 16 -; SI-NEXT: v_writelane_b32 v62, s4, 8 -; SI-NEXT: s_lshr_b32 s4, s8, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v15 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v11 -; SI-NEXT: v_mov_b32_e32 v11, s15 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v7, v14, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 9 -; SI-NEXT: s_lshr_b32 s4, s7, 24 -; SI-NEXT: v_or_b32_e32 v5, v28, v5 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v11, v12, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 10 -; SI-NEXT: s_lshr_b32 s4, s7, 16 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v36 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v35 -; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v23 -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v19 -; SI-NEXT: v_or_b32_e32 v10, v30, v10 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v11, v12, 16 -; SI-NEXT: v_writelane_b32 v62, s4, 11 -; SI-NEXT: s_lshr_b32 s4, s7, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v10 -; SI-NEXT: v_mov_b32_e32 v15, s14 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v11, v12, 8 -; SI-NEXT: v_writelane_b32 v62, s4, 12 -; SI-NEXT: s_lshr_b32 s4, s6, 24 -; SI-NEXT: v_or_b32_e32 v1, v50, v1 -; SI-NEXT: v_or_b32_e32 v2, v49, v2 -; SI-NEXT: v_or_b32_e32 v4, v48, v4 -; SI-NEXT: v_or_b32_e32 v5, v34, v5 -; SI-NEXT: v_or_b32_e32 v8, v32, v8 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v15, v10, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 13 -; SI-NEXT: s_lshr_b32 s4, s6, 16 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x30000, v1 -; SI-NEXT: v_mov_b32_e32 v37, s6 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x30000, v2 -; SI-NEXT: v_mov_b32_e32 v36, s7 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x30000, v4 -; SI-NEXT: v_mov_b32_e32 v35, s8 -; SI-NEXT: v_mov_b32_e32 v33, s9 -; SI-NEXT: v_mov_b32_e32 v20, s10 -; SI-NEXT: v_mov_b32_e32 v17, s11 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x30000, v5 -; SI-NEXT: v_mov_b32_e32 v18, s12 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v8 -; SI-NEXT: v_mov_b32_e32 v19, s13 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v15, v10, 16 -; SI-NEXT: v_writelane_b32 v62, s4, 14 -; SI-NEXT: s_lshr_b32 s4, s6, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v3, v15, v10, 8 -; SI-NEXT: v_alignbit_b32 v45, v19, v8, 24 -; SI-NEXT: v_alignbit_b32 v47, v19, v8, 16 -; SI-NEXT: v_alignbit_b32 v57, v19, v8, 8 -; SI-NEXT: v_alignbit_b32 v41, v18, v5, 24 -; SI-NEXT: v_alignbit_b32 v43, v18, v5, 16 -; SI-NEXT: v_alignbit_b32 v44, v18, v5, 8 -; SI-NEXT: v_alignbit_b32 v21, v17, v13, 24 -; SI-NEXT: v_alignbit_b32 v22, v17, v13, 16 -; SI-NEXT: v_alignbit_b32 v24, v17, v13, 8 -; SI-NEXT: v_alignbit_b32 v17, v20, v9, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v9, 16 -; SI-NEXT: v_alignbit_b32 v20, v20, v9, 8 -; SI-NEXT: v_alignbit_b32 v59, v33, v6, 24 -; SI-NEXT: v_alignbit_b32 v60, v33, v6, 16 -; SI-NEXT: v_alignbit_b32 v61, v33, v6, 8 -; SI-NEXT: v_alignbit_b32 v46, v35, v4, 24 -; SI-NEXT: v_alignbit_b32 v56, v35, v4, 16 -; SI-NEXT: v_alignbit_b32 v58, v35, v4, 8 -; SI-NEXT: v_alignbit_b32 v55, v36, v2, 24 -; SI-NEXT: v_alignbit_b32 v40, v36, v2, 16 -; SI-NEXT: v_alignbit_b32 v42, v36, v2, 8 -; SI-NEXT: v_alignbit_b32 v52, v37, v1, 24 -; SI-NEXT: v_alignbit_b32 v53, v37, v1, 16 -; SI-NEXT: v_alignbit_b32 v54, v37, v1, 8 -; SI-NEXT: s_lshr_b32 s83, s57, 24 -; SI-NEXT: s_lshr_b32 s71, s57, 16 -; SI-NEXT: s_lshr_b32 s68, s57, 8 -; SI-NEXT: s_lshr_b32 s80, s47, 24 -; SI-NEXT: s_lshr_b32 s69, s47, 16 -; SI-NEXT: s_lshr_b32 s65, s47, 8 -; SI-NEXT: s_lshr_b32 s70, s45, 24 -; SI-NEXT: s_lshr_b32 s66, s45, 16 -; SI-NEXT: s_lshr_b32 s54, s45, 8 -; SI-NEXT: s_lshr_b32 s67, s43, 24 -; SI-NEXT: s_lshr_b32 s55, s43, 16 -; SI-NEXT: s_lshr_b32 s51, s43, 8 -; SI-NEXT: s_lshr_b32 s64, s41, 24 -; SI-NEXT: s_lshr_b32 s52, s41, 16 -; SI-NEXT: s_lshr_b32 s48, s41, 8 -; SI-NEXT: s_lshr_b32 s53, s40, 24 -; SI-NEXT: s_lshr_b32 s49, s40, 16 -; SI-NEXT: s_lshr_b32 s37, s40, 8 -; SI-NEXT: s_lshr_b32 s50, s15, 24 -; SI-NEXT: s_lshr_b32 s38, s15, 16 -; SI-NEXT: s_lshr_b32 s34, s15, 8 -; SI-NEXT: s_lshr_b32 s39, s14, 24 -; SI-NEXT: s_lshr_b32 s35, s14, 16 -; SI-NEXT: s_lshr_b32 s95, s14, 8 -; SI-NEXT: s_lshr_b32 s36, s13, 24 -; SI-NEXT: s_lshr_b32 s30, s13, 16 -; SI-NEXT: s_lshr_b32 s92, s13, 8 -; SI-NEXT: s_lshr_b32 s31, s12, 24 -; SI-NEXT: s_lshr_b32 s93, s12, 16 -; SI-NEXT: s_lshr_b32 s89, s12, 8 -; SI-NEXT: s_lshr_b32 s94, s11, 24 -; SI-NEXT: s_lshr_b32 s90, s11, 16 -; SI-NEXT: s_lshr_b32 s91, s10, 24 -; SI-NEXT: v_writelane_b32 v62, s4, 15 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: .LBB99_3: ; %end -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload -; SI-NEXT: s_and_b32 s4, s56, 0xff -; SI-NEXT: s_lshl_b32 s5, s68, 8 -; SI-NEXT: s_lshl_b32 s16, s83, 24 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_readlane_b32 s99, v63, 35 -; SI-NEXT: v_readlane_b32 s98, v63, 34 -; SI-NEXT: v_readlane_b32 s97, v63, 33 -; SI-NEXT: v_readlane_b32 s96, v63, 32 -; SI-NEXT: v_readlane_b32 s87, v63, 31 -; SI-NEXT: v_readlane_b32 s86, v63, 30 -; SI-NEXT: v_readlane_b32 s85, v63, 29 -; SI-NEXT: v_readlane_b32 s84, v63, 28 -; SI-NEXT: v_readlane_b32 s83, v63, 27 -; SI-NEXT: v_readlane_b32 s82, v63, 26 -; SI-NEXT: v_readlane_b32 s81, v63, 25 -; SI-NEXT: v_readlane_b32 s68, v63, 20 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_or_b32_e32 v3, v7, v3 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s57, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s71, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v3, v7, v3 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: s_and_b32 s4, s46, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v51 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s47, 0xff -; SI-NEXT: s_lshl_b32 s5, s65, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s69, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s80, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s54, 8 -; SI-NEXT: s_lshl_b32 s16, s70, 24 -; SI-NEXT: v_readlane_b32 s80, v63, 24 -; SI-NEXT: v_readlane_b32 s71, v63, 23 -; SI-NEXT: v_readlane_b32 s70, v63, 22 -; SI-NEXT: v_readlane_b32 s69, v63, 21 -; SI-NEXT: v_readlane_b32 s65, v63, 17 -; SI-NEXT: v_readlane_b32 s54, v63, 14 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: s_and_b32 s4, s44, 0xff -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s66, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: s_and_b32 s4, s42, 0xff -; SI-NEXT: s_lshl_b32 s5, s51, 8 -; SI-NEXT: s_lshl_b32 s16, s67, 24 -; SI-NEXT: v_readlane_b32 s67, v63, 19 -; SI-NEXT: v_readlane_b32 s66, v63, 18 -; SI-NEXT: v_readlane_b32 s51, v63, 11 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s55, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xff, v16 -; SI-NEXT: s_and_b32 s4, s41, 0xff -; SI-NEXT: s_lshl_b32 s5, s48, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s52, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s64, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s37, 8 -; SI-NEXT: s_lshl_b32 s16, s53, 24 -; SI-NEXT: v_readlane_b32 s64, v63, 16 -; SI-NEXT: v_readlane_b32 s55, v63, 15 -; SI-NEXT: v_readlane_b32 s53, v63, 13 -; SI-NEXT: v_readlane_b32 s52, v63, 12 -; SI-NEXT: v_readlane_b32 s48, v63, 8 -; SI-NEXT: v_readlane_b32 s37, v63, 5 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xff, v14 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s49, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s34, 8 -; SI-NEXT: v_readlane_b32 s49, v63, 9 -; SI-NEXT: v_readlane_b32 s34, v63, 2 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:68 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_writelane_b32 v40, s30, 0 +; SI-NEXT: v_writelane_b32 v40, s31, 1 +; SI-NEXT: v_writelane_b32 v40, s34, 2 +; SI-NEXT: v_writelane_b32 v40, s35, 3 +; SI-NEXT: v_writelane_b32 v40, s36, 4 +; SI-NEXT: v_writelane_b32 v40, s37, 5 +; SI-NEXT: v_writelane_b32 v40, s38, 6 +; SI-NEXT: v_writelane_b32 v40, s39, 7 +; SI-NEXT: v_writelane_b32 v40, s48, 8 +; SI-NEXT: v_writelane_b32 v40, s49, 9 +; SI-NEXT: v_writelane_b32 v40, s50, 10 +; SI-NEXT: v_writelane_b32 v40, s51, 11 +; SI-NEXT: v_writelane_b32 v40, s52, 12 +; SI-NEXT: v_writelane_b32 v40, s53, 13 +; SI-NEXT: v_writelane_b32 v40, s54, 14 +; SI-NEXT: v_writelane_b32 v40, s55, 15 +; SI-NEXT: v_writelane_b32 v40, s64, 16 +; SI-NEXT: v_writelane_b32 v40, s65, 17 +; SI-NEXT: v_writelane_b32 v40, s66, 18 +; SI-NEXT: v_writelane_b32 v40, s67, 19 +; SI-NEXT: v_writelane_b32 v40, s68, 20 +; SI-NEXT: v_writelane_b32 v40, s69, 21 +; SI-NEXT: v_writelane_b32 v40, s70, 22 +; SI-NEXT: s_mov_b32 s88, s17 +; SI-NEXT: v_writelane_b32 v40, s71, 23 +; SI-NEXT: v_writelane_b32 v40, s80, 24 +; SI-NEXT: v_writelane_b32 v40, s81, 25 +; SI-NEXT: v_writelane_b32 v40, s82, 26 +; SI-NEXT: v_writelane_b32 v40, s83, 27 +; SI-NEXT: v_readfirstlane_b32 s6, v16 +; SI-NEXT: ; implicit-def: $vgpr41 : SGPR spill to VGPR lane +; SI-NEXT: v_readfirstlane_b32 s7, v15 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_writelane_b32 v41, s6, 0 +; SI-NEXT: v_readfirstlane_b32 s8, v21 +; SI-NEXT: v_writelane_b32 v41, s7, 1 +; SI-NEXT: v_readfirstlane_b32 s9, v20 +; SI-NEXT: v_writelane_b32 v41, s8, 2 +; SI-NEXT: v_readfirstlane_b32 s10, v19 +; SI-NEXT: v_writelane_b32 v41, s9, 3 +; SI-NEXT: v_readfirstlane_b32 s11, v25 +; SI-NEXT: v_writelane_b32 v41, s10, 4 +; SI-NEXT: v_readfirstlane_b32 s12, v24 +; SI-NEXT: v_writelane_b32 v41, s11, 5 +; SI-NEXT: v_readfirstlane_b32 s13, v23 +; SI-NEXT: v_writelane_b32 v41, s12, 6 +; SI-NEXT: v_readfirstlane_b32 s15, v29 +; SI-NEXT: v_writelane_b32 v41, s13, 7 +; SI-NEXT: v_readfirstlane_b32 s14, v28 +; SI-NEXT: v_writelane_b32 v41, s15, 8 +; SI-NEXT: s_mov_b32 s79, s16 +; SI-NEXT: v_readfirstlane_b32 s16, v27 +; SI-NEXT: v_writelane_b32 v41, s14, 9 +; SI-NEXT: v_writelane_b32 v41, s16, 10 +; SI-NEXT: v_writelane_b32 v40, s84, 28 +; SI-NEXT: v_writelane_b32 v40, s85, 29 +; SI-NEXT: v_writelane_b32 v40, s86, 30 +; SI-NEXT: v_writelane_b32 v40, s87, 31 +; SI-NEXT: v_writelane_b32 v40, s96, 32 +; SI-NEXT: v_writelane_b32 v40, s97, 33 +; SI-NEXT: v_writelane_b32 v40, s98, 34 +; SI-NEXT: v_writelane_b32 v40, s99, 35 +; SI-NEXT: v_readfirstlane_b32 s98, v30 +; SI-NEXT: v_readfirstlane_b32 s97, v26 +; SI-NEXT: v_readfirstlane_b32 s96, v22 +; SI-NEXT: v_readfirstlane_b32 s87, v18 +; SI-NEXT: v_readfirstlane_b32 s81, v17 +; SI-NEXT: v_readfirstlane_b32 s86, v14 +; SI-NEXT: v_readfirstlane_b32 s67, v13 +; SI-NEXT: v_readfirstlane_b32 s69, v12 +; SI-NEXT: v_readfirstlane_b32 s71, v11 +; SI-NEXT: v_readfirstlane_b32 s85, v10 +; SI-NEXT: v_readfirstlane_b32 s51, v9 +; SI-NEXT: v_readfirstlane_b32 s53, v8 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s89, v31 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s91, v32 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s93, v33 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s55, v34 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s17, v35 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s95, v36 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s35, v37 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s83, v38 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:80 +; SI-NEXT: v_readfirstlane_b32 s65, v7 +; SI-NEXT: v_readfirstlane_b32 s84, v6 +; SI-NEXT: v_readfirstlane_b32 s31, v5 +; SI-NEXT: v_readfirstlane_b32 s37, v4 +; SI-NEXT: v_readfirstlane_b32 s49, v3 +; SI-NEXT: v_readfirstlane_b32 s78, v2 +; SI-NEXT: v_readfirstlane_b32 s39, v1 +; SI-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane +; SI-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_readfirstlane_b32 s77, v31 +; SI-NEXT: s_waitcnt vmcnt(11) +; SI-NEXT: v_readfirstlane_b32 s38, v32 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_readfirstlane_b32 s48, v33 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_readfirstlane_b32 s50, v39 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_readfirstlane_b32 s76, v48 +; SI-NEXT: s_waitcnt vmcnt(7) +; SI-NEXT: v_readfirstlane_b32 s30, v49 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_readfirstlane_b32 s34, v50 +; SI-NEXT: s_waitcnt vmcnt(5) +; SI-NEXT: v_readfirstlane_b32 s36, v51 +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_readfirstlane_b32 s99, v34 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_readfirstlane_b32 s90, v35 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_readfirstlane_b32 s92, v36 +; SI-NEXT: v_writelane_b32 v41, s90, 11 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_readfirstlane_b32 s94, v37 +; SI-NEXT: v_writelane_b32 v41, s92, 12 +; SI-NEXT: v_writelane_b32 v41, s94, 13 +; SI-NEXT: v_writelane_b32 v41, s30, 14 +; SI-NEXT: v_writelane_b32 v41, s34, 15 +; SI-NEXT: v_writelane_b32 v41, s36, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xff, v12 -; SI-NEXT: s_and_b32 s4, s15, 0xff +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v38 +; SI-NEXT: v_writelane_b32 v41, s38, 17 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_writelane_b32 v41, s48, 18 +; SI-NEXT: v_writelane_b32 v41, s50, 19 +; SI-NEXT: s_cbranch_scc0 .LBB99_4 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_and_b32 s4, s79, 0xffff +; SI-NEXT: s_lshl_b32 s5, s88, 16 +; SI-NEXT: s_or_b32 s60, s4, s5 +; SI-NEXT: s_and_b32 s4, s18, 0xffff +; SI-NEXT: s_lshl_b32 s5, s19, 16 +; SI-NEXT: s_or_b32 s61, s4, s5 +; SI-NEXT: s_and_b32 s4, s20, 0xffff +; SI-NEXT: s_lshl_b32 s5, s21, 16 +; SI-NEXT: s_or_b32 s56, s4, s5 +; SI-NEXT: s_and_b32 s4, s22, 0xffff +; SI-NEXT: s_lshl_b32 s5, s23, 16 +; SI-NEXT: s_or_b32 s57, s4, s5 +; SI-NEXT: s_and_b32 s4, s24, 0xffff +; SI-NEXT: s_lshl_b32 s5, s25, 16 +; SI-NEXT: s_or_b32 s44, s4, s5 +; SI-NEXT: s_and_b32 s4, s26, 0xffff +; SI-NEXT: s_lshl_b32 s5, s27, 16 +; SI-NEXT: s_or_b32 s45, s4, s5 +; SI-NEXT: s_and_b32 s4, s28, 0xffff +; SI-NEXT: s_lshl_b32 s5, s29, 16 +; SI-NEXT: s_or_b32 s74, s4, s5 +; SI-NEXT: s_and_b32 s4, s39, 0xffff +; SI-NEXT: s_lshl_b32 s5, s78, 16 +; SI-NEXT: s_or_b32 s75, s4, s5 +; SI-NEXT: s_and_b32 s4, s49, 0xffff +; SI-NEXT: s_lshl_b32 s5, s37, 16 +; SI-NEXT: s_or_b32 s72, s4, s5 +; SI-NEXT: s_and_b32 s4, s31, 0xffff +; SI-NEXT: s_lshl_b32 s5, s84, 16 +; SI-NEXT: s_or_b32 s73, s4, s5 +; SI-NEXT: s_and_b32 s4, s65, 0xffff +; SI-NEXT: s_lshl_b32 s5, s53, 16 +; SI-NEXT: s_or_b32 s62, s4, s5 +; SI-NEXT: s_and_b32 s4, s51, 0xffff +; SI-NEXT: s_lshl_b32 s5, s85, 16 +; SI-NEXT: s_or_b32 s63, s4, s5 +; SI-NEXT: s_and_b32 s4, s71, 0xffff +; SI-NEXT: s_lshl_b32 s5, s69, 16 +; SI-NEXT: s_or_b32 s58, s4, s5 +; SI-NEXT: s_and_b32 s4, s67, 0xffff +; SI-NEXT: s_lshl_b32 s5, s86, 16 +; SI-NEXT: s_or_b32 s59, s4, s5 +; SI-NEXT: s_and_b32 s4, s7, 0xffff +; SI-NEXT: s_lshl_b32 s5, s6, 16 +; SI-NEXT: s_or_b32 s46, s4, s5 +; SI-NEXT: s_and_b32 s4, s81, 0xffff +; SI-NEXT: s_lshl_b32 s5, s87, 16 +; SI-NEXT: s_or_b32 s47, s4, s5 +; SI-NEXT: s_and_b32 s4, s10, 0xffff +; SI-NEXT: s_lshl_b32 s5, s9, 16 +; SI-NEXT: s_or_b32 s42, s4, s5 +; SI-NEXT: s_and_b32 s4, s8, 0xffff +; SI-NEXT: s_lshl_b32 s5, s96, 16 +; SI-NEXT: s_or_b32 s43, s4, s5 +; SI-NEXT: s_and_b32 s4, s13, 0xffff +; SI-NEXT: s_lshl_b32 s5, s12, 16 +; SI-NEXT: s_or_b32 s40, s4, s5 +; SI-NEXT: s_and_b32 s4, s11, 0xffff +; SI-NEXT: s_lshl_b32 s5, s97, 16 +; SI-NEXT: s_or_b32 s41, s4, s5 +; SI-NEXT: s_and_b32 s4, s16, 0xffff +; SI-NEXT: s_lshl_b32 s5, s14, 16 +; SI-NEXT: s_or_b32 s14, s4, s5 +; SI-NEXT: s_and_b32 s4, s15, 0xffff +; SI-NEXT: s_lshl_b32 s5, s98, 16 +; SI-NEXT: s_or_b32 s15, s4, s5 +; SI-NEXT: s_and_b32 s4, s94, 0xffff +; SI-NEXT: s_lshl_b32 s5, s92, 16 +; SI-NEXT: s_or_b32 s12, s4, s5 +; SI-NEXT: s_and_b32 s4, s90, 0xffff +; SI-NEXT: s_lshl_b32 s5, s99, 16 +; SI-NEXT: s_or_b32 s13, s4, s5 +; SI-NEXT: s_and_b32 s4, s36, 0xffff +; SI-NEXT: s_lshl_b32 s5, s34, 16 +; SI-NEXT: s_or_b32 s10, s4, s5 +; SI-NEXT: s_and_b32 s4, s30, 0xffff +; SI-NEXT: s_lshl_b32 s5, s76, 16 +; SI-NEXT: s_or_b32 s11, s4, s5 +; SI-NEXT: s_and_b32 s4, s50, 0xffff +; SI-NEXT: s_lshl_b32 s5, s48, 16 +; SI-NEXT: s_or_b32 s8, s4, s5 +; SI-NEXT: s_and_b32 s4, s38, 0xffff +; SI-NEXT: s_lshl_b32 s5, s77, 16 +; SI-NEXT: s_or_b32 s9, s4, s5 +; SI-NEXT: s_and_b32 s4, s83, 0xffff +; SI-NEXT: s_lshl_b32 s5, s35, 16 +; SI-NEXT: s_or_b32 s6, s4, s5 +; SI-NEXT: s_and_b32 s4, s95, 0xffff +; SI-NEXT: s_lshl_b32 s5, s17, 16 +; SI-NEXT: s_or_b32 s7, s4, s5 +; SI-NEXT: s_and_b32 s4, s55, 0xffff +; SI-NEXT: s_lshl_b32 s5, s93, 16 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s38, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s15, s50, 24 +; SI-NEXT: s_and_b32 s5, s91, 0xffff +; SI-NEXT: s_lshl_b32 s16, s89, 16 +; SI-NEXT: s_or_b32 s5, s5, s16 +; SI-NEXT: s_lshr_b32 s16, s61, 8 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v43, s16, 20 +; SI-NEXT: s_lshr_b32 s16, s57, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 23 +; SI-NEXT: s_lshr_b32 s16, s45, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 26 +; SI-NEXT: s_lshr_b32 s16, s75, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 29 +; SI-NEXT: s_lshr_b32 s16, s73, 8 +; SI-NEXT: s_lshr_b64 vcc, s[60:61], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 32 +; SI-NEXT: s_lshr_b32 s16, s63, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 22 +; SI-NEXT: v_writelane_b32 v43, s16, 35 +; SI-NEXT: s_lshr_b32 s16, s59, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 23 +; SI-NEXT: s_lshr_b64 vcc, s[60:61], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 38 +; SI-NEXT: s_lshr_b32 s16, s47, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 20 +; SI-NEXT: v_writelane_b32 v43, s16, 41 +; SI-NEXT: s_lshr_b32 s16, s43, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 21 +; SI-NEXT: s_lshr_b64 vcc, s[56:57], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 44 +; SI-NEXT: s_lshr_b32 s16, s41, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 28 +; SI-NEXT: v_writelane_b32 v43, s16, 47 +; SI-NEXT: s_lshr_b32 s16, s15, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 29 +; SI-NEXT: s_lshr_b64 vcc, s[56:57], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 50 +; SI-NEXT: s_lshr_b32 s16, s13, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 26 +; SI-NEXT: v_writelane_b32 v43, s16, 53 +; SI-NEXT: s_lshr_b32 s16, s11, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 27 +; SI-NEXT: s_lshr_b64 vcc, s[56:57], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 56 +; SI-NEXT: s_lshr_b32 s16, s9, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 59 +; SI-NEXT: s_lshr_b32 s16, s7, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 25 +; SI-NEXT: s_lshr_b64 vcc, s[46:47], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 62 +; SI-NEXT: s_lshr_b32 s16, s5, 8 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 32 +; SI-NEXT: v_writelane_b32 v42, s16, 1 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 33 +; SI-NEXT: s_lshr_b64 vcc, s[46:47], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 19 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 30 +; SI-NEXT: v_writelane_b32 v43, s16, 22 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 31 +; SI-NEXT: s_lshr_b64 vcc, s[42:43], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 25 +; SI-NEXT: s_and_b32 s16, s78, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 38 +; SI-NEXT: v_writelane_b32 v43, s16, 28 +; SI-NEXT: s_and_b32 s16, s84, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 39 +; SI-NEXT: s_lshr_b64 vcc, s[42:43], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 31 +; SI-NEXT: s_and_b32 s16, s85, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 36 +; SI-NEXT: v_writelane_b32 v43, s16, 34 +; SI-NEXT: s_and_b32 s16, s86, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 37 +; SI-NEXT: s_lshr_b64 vcc, s[42:43], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 37 +; SI-NEXT: s_and_b32 s16, s87, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 34 +; SI-NEXT: v_writelane_b32 v43, s16, 40 +; SI-NEXT: s_and_b32 s16, s96, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 35 +; SI-NEXT: s_lshr_b64 vcc, s[40:41], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 43 +; SI-NEXT: s_and_b32 s16, s97, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 44 +; SI-NEXT: v_writelane_b32 v43, s16, 46 +; SI-NEXT: s_and_b32 s16, s98, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 45 +; SI-NEXT: s_lshr_b64 vcc, s[40:41], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 49 +; SI-NEXT: s_and_b32 s16, s99, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 42 +; SI-NEXT: v_writelane_b32 v43, s16, 52 +; SI-NEXT: s_and_b32 s16, s76, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 43 +; SI-NEXT: s_lshr_b64 vcc, s[40:41], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 55 +; SI-NEXT: s_and_b32 s16, s77, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 40 +; SI-NEXT: v_writelane_b32 v43, s16, 58 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 41 +; SI-NEXT: s_lshr_b64 vcc, s[14:15], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 61 +; SI-NEXT: s_and_b32 s16, s89, 0xffff +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 50 +; SI-NEXT: v_writelane_b32 v42, s16, 0 +; SI-NEXT: s_bfe_u32 s16, s19, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 51 +; SI-NEXT: s_lshr_b64 vcc, s[14:15], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 18 +; SI-NEXT: s_bfe_u32 s16, s23, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 48 +; SI-NEXT: v_writelane_b32 v43, s16, 21 +; SI-NEXT: s_bfe_u32 s16, s27, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 49 +; SI-NEXT: s_lshr_b64 vcc, s[14:15], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 24 +; SI-NEXT: s_bfe_u32 s16, s78, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 46 +; SI-NEXT: v_writelane_b32 v43, s16, 27 +; SI-NEXT: s_bfe_u32 s16, s84, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 47 +; SI-NEXT: s_lshr_b64 vcc, s[12:13], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 30 +; SI-NEXT: s_bfe_u32 s16, s85, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 56 +; SI-NEXT: v_writelane_b32 v43, s16, 33 +; SI-NEXT: s_bfe_u32 s16, s86, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 57 +; SI-NEXT: s_lshr_b64 vcc, s[12:13], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 36 +; SI-NEXT: s_bfe_u32 s16, s87, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 54 +; SI-NEXT: v_writelane_b32 v43, s16, 39 +; SI-NEXT: s_bfe_u32 s16, s96, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 55 +; SI-NEXT: s_lshr_b64 vcc, s[12:13], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 42 +; SI-NEXT: s_bfe_u32 s16, s97, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 52 +; SI-NEXT: v_writelane_b32 v43, s16, 45 +; SI-NEXT: s_bfe_u32 s16, s98, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 53 +; SI-NEXT: s_lshr_b64 vcc, s[10:11], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 48 +; SI-NEXT: s_bfe_u32 s16, s99, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 62 +; SI-NEXT: v_writelane_b32 v43, s16, 51 +; SI-NEXT: s_bfe_u32 s16, s76, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 63 +; SI-NEXT: s_lshr_b64 vcc, s[10:11], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 54 +; SI-NEXT: s_bfe_u32 s16, s77, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 60 +; SI-NEXT: v_writelane_b32 v43, s16, 57 +; SI-NEXT: s_bfe_u32 s16, s17, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 61 +; SI-NEXT: s_lshr_b64 vcc, s[10:11], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 60 +; SI-NEXT: s_bfe_u32 s16, s89, 0x80008 +; SI-NEXT: v_writelane_b32 v41, vcc_lo, 58 +; SI-NEXT: v_writelane_b32 v43, s16, 63 +; SI-NEXT: v_writelane_b32 v41, vcc_hi, 59 +; SI-NEXT: s_lshr_b64 vcc, s[8:9], 24 +; SI-NEXT: s_mov_b32 s16, s93 +; SI-NEXT: s_lshr_b64 s[92:93], s[60:61], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 4 +; SI-NEXT: s_mov_b32 s93, s16 +; SI-NEXT: s_mov_b32 s16, s71 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 5 +; SI-NEXT: s_lshr_b64 vcc, s[8:9], 16 +; SI-NEXT: s_mov_b32 s71, s16 +; SI-NEXT: s_mov_b32 s16, s81 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 2 +; SI-NEXT: s_mov_b32 s81, s16 +; SI-NEXT: s_mov_b32 s16, s83 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 3 +; SI-NEXT: s_lshr_b64 vcc, s[8:9], 8 +; SI-NEXT: s_mov_b32 s83, s16 +; SI-NEXT: s_mov_b32 s16, s65 +; SI-NEXT: s_lshr_b64 s[64:65], s[74:75], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 0 +; SI-NEXT: s_mov_b32 s65, s16 +; SI-NEXT: s_mov_b32 s16, s67 +; SI-NEXT: s_lshr_b64 s[66:67], s[74:75], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 1 +; SI-NEXT: s_lshr_b64 vcc, s[6:7], 24 +; SI-NEXT: s_mov_b32 s67, s16 +; SI-NEXT: s_mov_b32 s16, s69 +; SI-NEXT: s_lshr_b64 s[68:69], s[74:75], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 10 +; SI-NEXT: s_mov_b32 s69, s16 +; SI-NEXT: s_mov_b32 s16, s51 +; SI-NEXT: s_lshr_b64 s[50:51], s[72:73], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 11 +; SI-NEXT: s_lshr_b64 vcc, s[6:7], 16 +; SI-NEXT: s_mov_b32 s51, s16 +; SI-NEXT: s_mov_b32 s16, s53 +; SI-NEXT: s_lshr_b64 s[52:53], s[72:73], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 8 +; SI-NEXT: s_mov_b32 s53, s16 +; SI-NEXT: s_mov_b32 s16, s55 +; SI-NEXT: s_lshr_b64 s[54:55], s[72:73], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 9 +; SI-NEXT: s_lshr_b64 vcc, s[6:7], 8 +; SI-NEXT: s_mov_b32 s55, s16 +; SI-NEXT: s_mov_b32 s16, s37 +; SI-NEXT: s_lshr_b64 s[36:37], s[62:63], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 6 +; SI-NEXT: s_mov_b32 s37, s16 +; SI-NEXT: s_mov_b32 s16, s39 +; SI-NEXT: s_lshr_b64 s[38:39], s[62:63], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 7 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 24 +; SI-NEXT: s_mov_b32 s39, s16 +; SI-NEXT: s_mov_b32 s16, s49 +; SI-NEXT: s_lshr_b64 s[48:49], s[62:63], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 16 +; SI-NEXT: s_mov_b32 s49, s16 +; SI-NEXT: s_mov_b32 s16, s95 +; SI-NEXT: s_lshr_b64 s[94:95], s[58:59], 24 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 17 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 16 +; SI-NEXT: s_mov_b32 s95, s16 +; SI-NEXT: s_mov_b32 s16, s31 +; SI-NEXT: s_lshr_b64 s[30:31], s[58:59], 16 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 14 +; SI-NEXT: s_mov_b32 s31, s16 +; SI-NEXT: s_mov_b32 s16, s35 +; SI-NEXT: s_lshr_b64 s[34:35], s[58:59], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 15 +; SI-NEXT: s_lshr_b64 vcc, s[4:5], 8 +; SI-NEXT: s_mov_b32 s35, s16 +; SI-NEXT: s_mov_b32 s16, s91 +; SI-NEXT: s_lshr_b64 s[90:91], s[46:47], 8 +; SI-NEXT: v_writelane_b32 v43, vcc_lo, 12 +; SI-NEXT: s_mov_b32 s91, s16 +; SI-NEXT: v_writelane_b32 v43, vcc_hi, 13 +; SI-NEXT: s_cbranch_execnz .LBB99_3 +; SI-NEXT: .LBB99_2: ; %cmp.true +; SI-NEXT: s_add_i32 s4, s55, 3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s15, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s95, 8 -; SI-NEXT: v_readlane_b32 s50, v63, 10 -; SI-NEXT: v_readlane_b32 s38, v63, 6 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s5, s93, 16 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_add_i32 s5, s91, 3 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_lshl_b32 s6, s89, 16 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_add_i32 s6, s83, 3 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s7, s35, 16 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_add_i32 s7, s95, 3 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_lshl_b32 s8, s17, 16 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_readlane_b32 s8, v41, 19 +; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: v_readlane_b32 s9, v41, 18 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readlane_b32 s9, v41, 17 +; SI-NEXT: s_add_i32 s9, s9, 3 +; SI-NEXT: s_and_b32 s9, s9, 0xffff +; SI-NEXT: s_lshl_b32 s10, s77, 16 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_readlane_b32 s10, v41, 16 +; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: v_readlane_b32 s11, v41, 15 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_readlane_b32 s11, v41, 14 +; SI-NEXT: s_add_i32 s11, s11, 3 +; SI-NEXT: s_and_b32 s11, s11, 0xffff +; SI-NEXT: s_lshl_b32 s12, s76, 16 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_readlane_b32 s12, v41, 13 +; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: v_readlane_b32 s13, v41, 12 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readlane_b32 s13, v41, 11 +; SI-NEXT: s_add_i32 s13, s13, 3 +; SI-NEXT: s_and_b32 s13, s13, 0xffff +; SI-NEXT: s_lshl_b32 s14, s99, 16 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_readlane_b32 s14, v41, 10 +; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: v_readlane_b32 s15, v41, 9 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_readlane_b32 s15, v41, 8 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: s_and_b32 s15, s15, 0xffff +; SI-NEXT: s_lshl_b32 s16, s98, 16 +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_readlane_b32 s16, v41, 7 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v41, 6 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s40, s16, 0x30000 +; SI-NEXT: v_readlane_b32 s16, v41, 5 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s97, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s41, s16, 0x30000 +; SI-NEXT: v_readlane_b32 s16, v41, 4 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v41, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s42, s16, 0x30000 +; SI-NEXT: v_readlane_b32 s16, v41, 2 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s96, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s43, s16, 0x30000 +; SI-NEXT: v_readlane_b32 s16, v41, 1 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: v_readlane_b32 s17, v41, 0 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s46, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s81, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s87, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s47, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s71, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s69, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s58, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s67, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s86, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s59, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s65, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s62, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s51, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s85, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s63, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s49, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s37, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s72, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s31, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s84, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s73, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s28, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s29, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s74, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s39, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s75, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s24, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s25, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s44, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s26, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s27, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s45, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s20, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s21, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s56, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s22, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s23, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s57, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s79, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s60, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s18, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s19, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s61, s16, 0x30000 +; SI-NEXT: s_lshr_b64 s[16:17], s[60:61], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 22 +; SI-NEXT: v_writelane_b32 v41, s17, 23 +; SI-NEXT: s_lshr_b64 s[16:17], s[60:61], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 20 +; SI-NEXT: v_writelane_b32 v41, s17, 21 +; SI-NEXT: s_lshr_b32 s16, s61, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 18 +; SI-NEXT: s_lshr_b32 s16, s61, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 19 +; SI-NEXT: s_lshr_b32 s16, s61, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 20 +; SI-NEXT: s_lshr_b32 s16, s57, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 21 +; SI-NEXT: s_lshr_b32 s16, s57, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 22 +; SI-NEXT: s_lshr_b32 s16, s57, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 23 +; SI-NEXT: s_lshr_b32 s16, s45, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 24 +; SI-NEXT: s_lshr_b32 s16, s45, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 25 +; SI-NEXT: s_lshr_b32 s16, s45, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 26 +; SI-NEXT: s_lshr_b32 s16, s75, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 27 +; SI-NEXT: s_lshr_b32 s16, s75, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 28 +; SI-NEXT: s_lshr_b32 s16, s75, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 29 +; SI-NEXT: s_lshr_b32 s16, s73, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 30 +; SI-NEXT: s_lshr_b32 s16, s73, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 31 +; SI-NEXT: s_lshr_b32 s16, s73, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 32 +; SI-NEXT: s_lshr_b32 s16, s63, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 33 +; SI-NEXT: s_lshr_b32 s16, s63, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 34 +; SI-NEXT: s_lshr_b32 s16, s63, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 35 +; SI-NEXT: s_lshr_b32 s16, s59, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 36 +; SI-NEXT: s_lshr_b32 s16, s59, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 37 +; SI-NEXT: s_lshr_b32 s16, s59, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 38 +; SI-NEXT: s_lshr_b32 s16, s47, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 39 +; SI-NEXT: s_lshr_b32 s16, s47, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 40 +; SI-NEXT: s_lshr_b32 s16, s47, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 41 +; SI-NEXT: s_lshr_b32 s16, s43, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 42 +; SI-NEXT: s_lshr_b32 s16, s43, 16 +; SI-NEXT: v_writelane_b32 v43, s16, 43 +; SI-NEXT: s_lshr_b32 s16, s43, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 44 +; SI-NEXT: s_lshr_b32 s16, s41, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 45 +; SI-NEXT: s_lshr_b32 s16, s41, 16 +; SI-NEXT: s_add_i32 s15, s15, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 46 +; SI-NEXT: s_lshr_b32 s16, s41, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 47 +; SI-NEXT: s_lshr_b32 s16, s15, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 48 +; SI-NEXT: s_lshr_b32 s16, s15, 16 +; SI-NEXT: s_add_i32 s13, s13, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 49 +; SI-NEXT: s_lshr_b32 s16, s15, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 50 +; SI-NEXT: s_lshr_b32 s16, s13, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 51 +; SI-NEXT: s_lshr_b32 s16, s13, 16 +; SI-NEXT: s_add_i32 s11, s11, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 52 +; SI-NEXT: s_lshr_b32 s16, s13, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 53 +; SI-NEXT: s_lshr_b32 s16, s11, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 54 +; SI-NEXT: s_lshr_b32 s16, s11, 16 +; SI-NEXT: s_add_i32 s9, s9, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 55 +; SI-NEXT: s_lshr_b32 s16, s11, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 56 +; SI-NEXT: s_lshr_b32 s16, s9, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 57 +; SI-NEXT: s_lshr_b32 s16, s9, 16 +; SI-NEXT: s_add_i32 s7, s7, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 58 +; SI-NEXT: s_lshr_b32 s16, s9, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 59 +; SI-NEXT: s_lshr_b32 s16, s7, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 60 +; SI-NEXT: s_lshr_b32 s16, s7, 16 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 61 +; SI-NEXT: s_lshr_b32 s16, s7, 8 +; SI-NEXT: v_writelane_b32 v43, s16, 62 +; SI-NEXT: s_lshr_b32 s16, s5, 24 +; SI-NEXT: v_writelane_b32 v43, s16, 63 +; SI-NEXT: s_lshr_b32 s16, s5, 16 +; SI-NEXT: v_writelane_b32 v42, s16, 0 +; SI-NEXT: s_lshr_b32 s16, s5, 8 +; SI-NEXT: v_writelane_b32 v42, s16, 1 +; SI-NEXT: s_lshr_b64 s[16:17], s[56:57], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 28 +; SI-NEXT: v_writelane_b32 v41, s17, 29 +; SI-NEXT: s_lshr_b64 s[16:17], s[56:57], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 26 +; SI-NEXT: v_writelane_b32 v41, s17, 27 +; SI-NEXT: s_lshr_b64 s[16:17], s[56:57], 8 +; SI-NEXT: v_writelane_b32 v41, s16, 24 +; SI-NEXT: v_writelane_b32 v41, s17, 25 +; SI-NEXT: s_lshr_b64 s[16:17], s[46:47], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 32 +; SI-NEXT: v_writelane_b32 v41, s17, 33 +; SI-NEXT: s_lshr_b64 s[16:17], s[46:47], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 30 +; SI-NEXT: v_writelane_b32 v41, s17, 31 +; SI-NEXT: s_lshr_b64 s[16:17], s[42:43], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 38 +; SI-NEXT: v_writelane_b32 v41, s17, 39 +; SI-NEXT: s_lshr_b64 s[16:17], s[42:43], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 36 +; SI-NEXT: v_writelane_b32 v41, s17, 37 +; SI-NEXT: s_lshr_b64 s[16:17], s[42:43], 8 +; SI-NEXT: v_writelane_b32 v41, s16, 34 +; SI-NEXT: v_writelane_b32 v41, s17, 35 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 44 +; SI-NEXT: v_writelane_b32 v41, s17, 45 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 42 +; SI-NEXT: v_writelane_b32 v41, s17, 43 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 8 +; SI-NEXT: s_add_i32 s14, s14, 0x30000 +; SI-NEXT: v_writelane_b32 v41, s16, 40 +; SI-NEXT: v_writelane_b32 v41, s17, 41 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 50 +; SI-NEXT: v_writelane_b32 v41, s17, 51 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 48 +; SI-NEXT: v_writelane_b32 v41, s17, 49 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 +; SI-NEXT: s_add_i32 s12, s12, 0x30000 +; SI-NEXT: v_writelane_b32 v41, s16, 46 +; SI-NEXT: v_writelane_b32 v41, s17, 47 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 56 +; SI-NEXT: v_writelane_b32 v41, s17, 57 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 54 +; SI-NEXT: v_writelane_b32 v41, s17, 55 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 8 +; SI-NEXT: s_add_i32 s10, s10, 0x30000 +; SI-NEXT: v_writelane_b32 v41, s16, 52 +; SI-NEXT: v_writelane_b32 v41, s17, 53 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v41, s16, 62 +; SI-NEXT: v_writelane_b32 v41, s17, 63 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v41, s16, 60 +; SI-NEXT: v_writelane_b32 v41, s17, 61 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 8 +; SI-NEXT: s_add_i32 s8, s8, 0x30000 +; SI-NEXT: v_writelane_b32 v41, s16, 58 +; SI-NEXT: v_writelane_b32 v41, s17, 59 +; SI-NEXT: s_lshr_b64 s[16:17], s[8:9], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 4 +; SI-NEXT: v_writelane_b32 v43, s17, 5 +; SI-NEXT: s_lshr_b64 s[16:17], s[8:9], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 2 +; SI-NEXT: v_writelane_b32 v43, s17, 3 +; SI-NEXT: s_lshr_b64 s[16:17], s[8:9], 8 +; SI-NEXT: s_add_i32 s6, s6, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 0 +; SI-NEXT: v_writelane_b32 v43, s17, 1 +; SI-NEXT: s_lshr_b64 s[16:17], s[6:7], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 10 +; SI-NEXT: v_writelane_b32 v43, s17, 11 +; SI-NEXT: s_lshr_b64 s[16:17], s[6:7], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 8 +; SI-NEXT: v_writelane_b32 v43, s17, 9 +; SI-NEXT: s_lshr_b64 s[16:17], s[6:7], 8 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: v_writelane_b32 v43, s16, 6 +; SI-NEXT: v_writelane_b32 v43, s17, 7 +; SI-NEXT: s_lshr_b64 s[16:17], s[4:5], 24 +; SI-NEXT: v_writelane_b32 v43, s16, 16 +; SI-NEXT: v_writelane_b32 v43, s17, 17 +; SI-NEXT: s_lshr_b64 s[16:17], s[4:5], 16 +; SI-NEXT: v_writelane_b32 v43, s16, 14 +; SI-NEXT: v_writelane_b32 v43, s17, 15 +; SI-NEXT: s_lshr_b64 s[16:17], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[60:61], 8 +; SI-NEXT: s_lshr_b64 s[70:71], s[44:45], 24 +; SI-NEXT: s_lshr_b64 s[80:81], s[44:45], 16 +; SI-NEXT: s_lshr_b64 s[82:83], s[44:45], 8 +; SI-NEXT: s_lshr_b64 s[64:65], s[74:75], 24 +; SI-NEXT: s_lshr_b64 s[66:67], s[74:75], 16 +; SI-NEXT: s_lshr_b64 s[68:69], s[74:75], 8 +; SI-NEXT: s_lshr_b64 s[50:51], s[72:73], 24 +; SI-NEXT: s_lshr_b64 s[52:53], s[72:73], 16 +; SI-NEXT: s_lshr_b64 s[54:55], s[72:73], 8 +; SI-NEXT: s_lshr_b64 s[36:37], s[62:63], 24 +; SI-NEXT: s_lshr_b64 s[38:39], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[48:49], s[62:63], 8 +; SI-NEXT: s_lshr_b64 s[94:95], s[58:59], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[58:59], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[58:59], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[46:47], 8 +; SI-NEXT: v_writelane_b32 v43, s16, 12 +; SI-NEXT: v_writelane_b32 v43, s17, 13 +; SI-NEXT: .LBB99_3: ; %end +; SI-NEXT: s_lshl_b32 s17, s92, 8 +; SI-NEXT: s_and_b32 s18, s60, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v41, 20 +; SI-NEXT: v_readlane_b32 s19, v41, 21 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: v_readlane_b32 s20, v41, 22 +; SI-NEXT: s_lshl_b32 s19, s20, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 20 +; SI-NEXT: v_mov_b32_e32 v1, s17 +; SI-NEXT: s_and_b32 s17, s61, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 19 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 18 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v2, s17 +; SI-NEXT: v_readlane_b32 s16, v41, 24 +; SI-NEXT: v_readlane_b32 s17, v41, 25 +; SI-NEXT: s_lshl_b32 s17, s16, 8 +; SI-NEXT: s_and_b32 s18, s56, 0xff +; SI-NEXT: v_readlane_b32 s21, v41, 23 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v41, 26 +; SI-NEXT: v_readlane_b32 s19, v41, 27 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: v_readlane_b32 s20, v41, 28 +; SI-NEXT: s_lshl_b32 s19, s20, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 23 +; SI-NEXT: v_mov_b32_e32 v3, s17 +; SI-NEXT: s_and_b32 s17, s57, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 22 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 21 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v4, s17 +; SI-NEXT: s_lshl_b32 s17, s82, 8 +; SI-NEXT: s_and_b32 s18, s44, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s80, 0xff +; SI-NEXT: s_lshl_b32 s19, s70, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 26 +; SI-NEXT: v_mov_b32_e32 v5, s17 +; SI-NEXT: s_and_b32 s17, s45, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 25 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v6, s17 +; SI-NEXT: s_lshl_b32 s17, s68, 8 +; SI-NEXT: s_and_b32 s18, s74, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s66, 0xff +; SI-NEXT: s_lshl_b32 s19, s64, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 29 +; SI-NEXT: v_mov_b32_e32 v7, s17 +; SI-NEXT: s_and_b32 s17, s75, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 28 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 27 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v8, s17 +; SI-NEXT: s_lshl_b32 s17, s54, 8 +; SI-NEXT: s_and_b32 s18, s72, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s52, 0xff +; SI-NEXT: s_lshl_b32 s19, s50, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 32 +; SI-NEXT: v_mov_b32_e32 v9, s17 +; SI-NEXT: s_and_b32 s17, s73, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 31 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 30 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v10, s17 +; SI-NEXT: s_lshl_b32 s17, s48, 8 +; SI-NEXT: s_and_b32 s18, s62, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s38, 0xff +; SI-NEXT: s_lshl_b32 s19, s36, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 35 +; SI-NEXT: v_mov_b32_e32 v11, s17 +; SI-NEXT: s_and_b32 s17, s63, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 34 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 33 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v12, s17 +; SI-NEXT: s_lshl_b32 s17, s34, 8 +; SI-NEXT: s_and_b32 s18, s58, 0xff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s18, s30, 0xff +; SI-NEXT: s_lshl_b32 s19, s94, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 38 +; SI-NEXT: v_mov_b32_e32 v13, s17 +; SI-NEXT: s_and_b32 s17, s59, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 37 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 36 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_mov_b32_e32 v14, s17 +; SI-NEXT: s_lshl_b32 s17, s90, 8 +; SI-NEXT: s_and_b32 s18, s46, 0xff +; SI-NEXT: v_readlane_b32 s21, v41, 29 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v41, 30 +; SI-NEXT: v_readlane_b32 s19, v41, 31 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: v_readlane_b32 s20, v41, 32 +; SI-NEXT: s_lshl_b32 s19, s20, 24 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: v_readlane_b32 s16, v43, 41 +; SI-NEXT: v_mov_b32_e32 v15, s17 +; SI-NEXT: s_and_b32 s17, s47, 0xff +; SI-NEXT: s_lshl_b32 s18, s16, 8 +; SI-NEXT: v_readlane_b32 s16, v43, 40 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: s_and_b32 s18, s16, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 39 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s16, 24 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: s_and_b32 s17, s17, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xff, v10 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s35, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s39, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_lshl_b32 s5, s92, 8 -; SI-NEXT: v_readlane_b32 s39, v63, 7 -; SI-NEXT: v_readlane_b32 s35, v63, 3 -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v10, v7 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s17, s17, s18 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v16, s17 +; SI-NEXT: v_readlane_b32 s16, v41, 34 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: v_readlane_b32 s17, v41, 35 +; SI-NEXT: v_readlane_b32 s18, v41, 36 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: s_and_b32 s17, s42, 0xff +; SI-NEXT: v_readlane_b32 s19, v41, 37 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v41, 38 +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: v_readlane_b32 s17, v43, 44 +; SI-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v43, 43 +; SI-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 42 +; SI-NEXT: v_readlane_b32 s19, v41, 39 +; SI-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readlane_b32 s18, v41, 40 +; SI-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: v_readlane_b32 s19, v41, 41 +; SI-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_lshl_b32 s17, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v41, 42 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: v_readlane_b32 s19, v41, 43 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v41, 44 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v43, 47 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v57 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v47 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s30, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v45 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s13, s36, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s13, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_readlane_b32 s17, v43, 46 +; SI-NEXT: s_and_b32 s17, s17, 0xff +; SI-NEXT: v_readlane_b32 s18, v43, 45 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v44 -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: s_lshl_b32 s5, s89, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v43 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s93, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: v_readlane_b32 s16, v41, 46 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s17, v41, 47 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: v_readlane_b32 s19, v41, 45 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: v_readlane_b32 s16, v41, 48 +; SI-NEXT: v_readlane_b32 s17, v41, 49 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: v_readlane_b32 s18, v41, 50 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s18, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v41 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s31, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: v_readlane_b32 s15, v43, 50 +; SI-NEXT: s_lshl_b32 s15, s15, 8 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_readlane_b32 s15, v43, 49 +; SI-NEXT: s_and_b32 s15, s15, 0xff +; SI-NEXT: v_readlane_b32 s16, v43, 48 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_lshl_b32 s16, s16, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: v_readlane_b32 s5, v62, 1 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v13 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_readlane_b32 s14, v41, 52 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s15, v41, 53 +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_readlane_b32 s14, v41, 54 +; SI-NEXT: v_readlane_b32 s15, v41, 55 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s16, v41, 56 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s16, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v24 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s90, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v21 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s94, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s11, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 3 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: v_readlane_b32 s13, v43, 53 +; SI-NEXT: s_lshl_b32 s13, s13, 8 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_readlane_b32 s13, v43, 52 +; SI-NEXT: s_and_b32 s13, s13, 0xff +; SI-NEXT: v_readlane_b32 s14, v43, 51 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v9 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_readlane_b32 s12, v41, 58 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s13, v41, 59 +; SI-NEXT: s_lshl_b32 s12, s12, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: v_readlane_b32 s12, v41, 60 +; SI-NEXT: v_readlane_b32 s13, v41, 61 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: v_readlane_b32 s14, v41, 62 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s14, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 2 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v18 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s91, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 6 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: v_readlane_b32 s11, v43, 56 +; SI-NEXT: s_lshl_b32 s11, s11, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_readlane_b32 s11, v43, 55 +; SI-NEXT: s_and_b32 s11, s11, 0xff +; SI-NEXT: v_readlane_b32 s12, v43, 54 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s12, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v6 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: v_readlane_b32 s10, v43, 0 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s11, v43, 1 +; SI-NEXT: s_lshl_b32 s10, s10, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: v_readlane_b32 s10, v43, 2 +; SI-NEXT: v_readlane_b32 s11, v43, 3 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s12, v43, 4 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v61 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v60 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_readlane_b32 s9, v62, 4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v59 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s9, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 9 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: v_readlane_b32 s9, v43, 59 +; SI-NEXT: s_lshl_b32 s9, s9, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_readlane_b32 s9, v43, 58 +; SI-NEXT: s_and_b32 s9, s9, 0xff +; SI-NEXT: v_readlane_b32 s10, v43, 57 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s10, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v58 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v56 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_readlane_b32 s8, v62, 7 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_readlane_b32 s8, v43, 6 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s9, v43, 7 +; SI-NEXT: s_lshl_b32 s8, s8, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: v_readlane_b32 s8, v43, 8 +; SI-NEXT: v_readlane_b32 s9, v43, 9 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: v_readlane_b32 s10, v43, 10 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v46 -; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: v_readlane_b32 s7, v43, 62 +; SI-NEXT: s_lshl_b32 s7, s7, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_readlane_b32 s7, v43, 61 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: v_readlane_b32 s8, v43, 60 +; SI-NEXT: s_lshl_b32 s7, s7, 16 ; SI-NEXT: s_lshl_b32 s8, s8, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 12 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v42 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 11 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v40 -; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_readlane_b32 s7, v62, 10 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v55 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s7, s7, 24 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_readlane_b32 s6, v43, 12 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: v_readlane_b32 s7, v43, 13 +; SI-NEXT: s_lshl_b32 s6, s6, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: v_readlane_b32 s6, v43, 14 +; SI-NEXT: v_readlane_b32 s7, v43, 15 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: v_readlane_b32 s8, v43, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s8, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s7, s5 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 15 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x74, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s6, 0xff +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: v_readlane_b32 s5, v42, 1 ; SI-NEXT: s_lshl_b32 s5, s5, 8 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v54 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_readlane_b32 s5, v62, 14 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v53 +; SI-NEXT: v_readlane_b32 s5, v42, 0 ; SI-NEXT: s_and_b32 s5, s5, 0xff -; SI-NEXT: v_readlane_b32 s6, v62, 13 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v52 +; SI-NEXT: v_readlane_b32 s6, v43, 63 ; SI-NEXT: s_lshl_b32 s5, s5, 16 ; SI-NEXT: s_lshl_b32 s6, s6, 24 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x78, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s21, v41, 33 +; SI-NEXT: v_readlane_b32 s19, v41, 51 +; SI-NEXT: v_readlane_b32 s17, v41, 57 +; SI-NEXT: v_readlane_b32 s15, v41, 63 +; SI-NEXT: v_readlane_b32 s13, v43, 5 +; SI-NEXT: v_readlane_b32 s11, v43, 11 +; SI-NEXT: v_readlane_b32 s9, v43, 17 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: v_readlane_b32 s36, v63, 4 -; SI-NEXT: v_readlane_b32 s31, v63, 1 -; SI-NEXT: v_readlane_b32 s30, v63, 0 +; SI-NEXT: v_readlane_b32 s99, v40, 35 +; SI-NEXT: v_readlane_b32 s98, v40, 34 +; SI-NEXT: v_readlane_b32 s97, v40, 33 +; SI-NEXT: v_readlane_b32 s96, v40, 32 +; SI-NEXT: v_readlane_b32 s87, v40, 31 +; SI-NEXT: v_readlane_b32 s86, v40, 30 +; SI-NEXT: v_readlane_b32 s85, v40, 29 +; SI-NEXT: v_readlane_b32 s84, v40, 28 +; SI-NEXT: v_readlane_b32 s83, v40, 27 +; SI-NEXT: v_readlane_b32 s82, v40, 26 +; SI-NEXT: v_readlane_b32 s81, v40, 25 +; SI-NEXT: v_readlane_b32 s80, v40, 24 +; SI-NEXT: v_readlane_b32 s71, v40, 23 +; SI-NEXT: v_readlane_b32 s70, v40, 22 +; SI-NEXT: v_readlane_b32 s69, v40, 21 +; SI-NEXT: v_readlane_b32 s68, v40, 20 +; SI-NEXT: v_readlane_b32 s67, v40, 19 +; SI-NEXT: v_readlane_b32 s66, v40, 18 +; SI-NEXT: v_readlane_b32 s65, v40, 17 +; SI-NEXT: v_readlane_b32 s64, v40, 16 +; SI-NEXT: v_readlane_b32 s55, v40, 15 +; SI-NEXT: v_readlane_b32 s54, v40, 14 +; SI-NEXT: v_readlane_b32 s53, v40, 13 +; SI-NEXT: v_readlane_b32 s52, v40, 12 +; SI-NEXT: v_readlane_b32 s51, v40, 11 +; SI-NEXT: v_readlane_b32 s50, v40, 10 +; SI-NEXT: v_readlane_b32 s49, v40, 9 +; SI-NEXT: v_readlane_b32 s48, v40, 8 +; SI-NEXT: v_readlane_b32 s39, v40, 7 +; SI-NEXT: v_readlane_b32 s38, v40, 6 +; SI-NEXT: v_readlane_b32 s37, v40, 5 +; SI-NEXT: v_readlane_b32 s36, v40, 4 +; SI-NEXT: v_readlane_b32 s35, v40, 3 +; SI-NEXT: v_readlane_b32 s34, v40, 2 +; SI-NEXT: v_readlane_b32 s31, v40, 1 +; SI-NEXT: v_readlane_b32 s30, v40, 0 ; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vcc_lo -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; kill: killed $vcc_lo -; SI-NEXT: ; implicit-def: $vcc_lo +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: v_writelane_b32 v41, s4, 20 +; SI-NEXT: v_writelane_b32 v41, s5, 21 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $sgpr68 -; SI-NEXT: ; implicit-def: $sgpr71 -; SI-NEXT: ; implicit-def: $sgpr83 -; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $sgpr65 -; SI-NEXT: ; implicit-def: $sgpr69 -; SI-NEXT: ; implicit-def: $sgpr80 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $sgpr54 -; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr80 ; SI-NEXT: ; implicit-def: $sgpr70 -; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $sgpr51 -; SI-NEXT: ; implicit-def: $sgpr55 -; SI-NEXT: ; implicit-def: $sgpr67 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $sgpr48 -; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr66 ; SI-NEXT: ; implicit-def: $sgpr64 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr37 -; SI-NEXT: ; implicit-def: $sgpr49 -; SI-NEXT: ; implicit-def: $sgpr53 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr34 -; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr52 ; SI-NEXT: ; implicit-def: $sgpr50 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr95 -; SI-NEXT: ; implicit-def: $sgpr35 -; SI-NEXT: ; implicit-def: $sgpr39 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr92 -; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr36 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr89 -; SI-NEXT: ; implicit-def: $sgpr93 -; SI-NEXT: ; implicit-def: $sgpr31 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr91 -; SI-NEXT: ; implicit-def: $sgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; kill: killed $vcc_lo -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vcc_lo -; SI-NEXT: ; kill: killed $vcc_lo -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; kill: killed $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: v_writelane_b32 v41, s4, 22 +; SI-NEXT: v_writelane_b32 v41, s5, 23 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 24 +; SI-NEXT: v_writelane_b32 v41, s5, 25 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 26 +; SI-NEXT: v_writelane_b32 v41, s5, 27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 28 +; SI-NEXT: v_writelane_b32 v41, s5, 29 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; kill: killed $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 30 +; SI-NEXT: v_writelane_b32 v41, s5, 31 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 32 +; SI-NEXT: v_writelane_b32 v41, s5, 33 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 34 +; SI-NEXT: v_writelane_b32 v41, s5, 35 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 36 +; SI-NEXT: v_writelane_b32 v41, s5, 37 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 38 +; SI-NEXT: v_writelane_b32 v41, s5, 39 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 40 +; SI-NEXT: v_writelane_b32 v41, s5, 41 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 42 +; SI-NEXT: v_writelane_b32 v41, s5, 43 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 44 +; SI-NEXT: v_writelane_b32 v41, s5, 45 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 46 +; SI-NEXT: v_writelane_b32 v41, s5, 47 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 48 +; SI-NEXT: v_writelane_b32 v41, s5, 49 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 50 +; SI-NEXT: v_writelane_b32 v41, s5, 51 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 52 +; SI-NEXT: v_writelane_b32 v41, s5, 53 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 54 +; SI-NEXT: v_writelane_b32 v41, s5, 55 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 56 +; SI-NEXT: v_writelane_b32 v41, s5, 57 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 58 +; SI-NEXT: v_writelane_b32 v41, s5, 59 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 60 +; SI-NEXT: v_writelane_b32 v41, s5, 61 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v41, s4, 62 +; SI-NEXT: v_writelane_b32 v41, s5, 63 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v43, s4, 0 +; SI-NEXT: v_writelane_b32 v43, s5, 1 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 2 +; SI-NEXT: v_writelane_b32 v43, s5, 3 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 4 +; SI-NEXT: v_writelane_b32 v43, s5, 5 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 6 +; SI-NEXT: v_writelane_b32 v43, s5, 7 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 8 +; SI-NEXT: v_writelane_b32 v43, s5, 9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s4, 10 +; SI-NEXT: v_writelane_b32 v43, s5, 11 +; SI-NEXT: v_writelane_b32 v43, s16, 12 +; SI-NEXT: v_writelane_b32 v43, s17, 13 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v43, s16, 14 +; SI-NEXT: v_writelane_b32 v43, s17, 15 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: v_writelane_b32 v43, s16, 16 +; SI-NEXT: v_writelane_b32 v43, s17, 17 ; SI-NEXT: s_branch .LBB99_2 ; ; VI-LABEL: bitcast_v64i16_to_v128i8_scalar: @@ -227209,7 +229205,6 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a ; SI-LABEL: bitcast_v64bf16_to_v64i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v50, v22 ; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill @@ -227226,1018 +229221,1096 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: v_mul_f32_e32 v58, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v2 -; SI-NEXT: v_mul_f32_e32 v2, 1.0, v50 ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80 ; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:68 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:76 +; SI-NEXT: v_mul_f32_e32 v38, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v4 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v62, v28 -; SI-NEXT: v_mul_f32_e32 v61, 1.0, v10 -; SI-NEXT: v_mul_f32_e32 v10, 1.0, v62 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mul_f32_e32 v1, 1.0, v3 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v60, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v59, 1.0, v6 -; SI-NEXT: v_mul_f32_e32 v29, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v28, 1.0, v8 -; SI-NEXT: v_mul_f32_e32 v63, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v37, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v11, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v8 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v12 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v20 +; SI-NEXT: v_mul_f32_e32 v59, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v4, 1.0, v6 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v8, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v6, 1.0, v10 +; SI-NEXT: v_mul_f32_e32 v45, 1.0, v11 ; SI-NEXT: v_mul_f32_e32 v12, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v30, 1.0, v14 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v15 -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v17 -; SI-NEXT: v_mul_f32_e32 v33, 1.0, v18 -; SI-NEXT: v_mul_f32_e32 v15, 1.0, v20 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v21 -; SI-NEXT: v_mul_f32_e32 v14, 1.0, v23 -; SI-NEXT: v_mul_f32_e32 v21, 1.0, v24 -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v25 -; SI-NEXT: v_mul_f32_e32 v25, 1.0, v26 -; SI-NEXT: v_mul_f32_e32 v13, 1.0, v27 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v3, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v18, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v5, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v20, 1.0, s26 +; SI-NEXT: v_mul_f32_e32 v10, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_mul_f32_e32 v14, 1.0, v18 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v19 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill +; SI-NEXT: v_mul_f32_e32 v18, 1.0, v21 +; SI-NEXT: v_mul_f32_e32 v16, 1.0, v22 +; SI-NEXT: v_mul_f32_e32 v19, 1.0, v23 +; SI-NEXT: v_mul_f32_e32 v23, 1.0, v25 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_mul_f32_e32 v63, 1.0, v28 +; SI-NEXT: v_mul_f32_e32 v7, 1.0, v29 +; SI-NEXT: v_mul_f32_e32 v11, 1.0, v30 +; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v30, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v22, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v20, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v29, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v28, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v21, 1.0, s28 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 -; SI-NEXT: v_mul_f32_e32 v31, 1.0, v16 -; SI-NEXT: v_mul_f32_e32 v16, 1.0, v19 -; SI-NEXT: v_mul_f32_e32 v9, 1.0, v32 -; SI-NEXT: v_mul_f32_e32 v27, 1.0, v35 +; SI-NEXT: v_mul_f32_e32 v31, 1.0, v17 +; SI-NEXT: v_mul_f32_e32 v17, 1.0, v24 +; SI-NEXT: v_mul_f32_e32 v24, 1.0, v26 +; SI-NEXT: v_mul_f32_e32 v62, 1.0, v32 +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v33 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e32 v35, 1.0, v36 +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v34 +; SI-NEXT: v_mul_f32_e32 v35, 1.0, v35 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v36 +; SI-NEXT: v_mul_f32_e32 v60, 1.0, v37 +; SI-NEXT: v_mul_f32_e32 v37, 1.0, v39 ; SI-NEXT: v_mul_f32_e32 v36, 1.0, v48 -; SI-NEXT: v_mul_f32_e32 v8, 1.0, v49 -; SI-NEXT: v_mul_f32_e32 v26, 1.0, v51 -; SI-NEXT: v_mul_f32_e32 v49, 1.0, v52 -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v53 -; SI-NEXT: v_mul_f32_e32 v24, 1.0, v54 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v49 +; SI-NEXT: v_mul_f32_e32 v56, 1.0, v51 +; SI-NEXT: v_mul_f32_e32 v5, 1.0, v52 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v54 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v55 +; SI-NEXT: v_mul_f32_e32 v51, 1.0, v55 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v40 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v40 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v41 +; SI-NEXT: v_mul_f32_e32 v42, 1.0, v41 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_mul_f32_e32 v7, 1.0, v42 +; SI-NEXT: v_mul_f32_e32 v48, 1.0, v43 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_mul_f32_e32 v6, 1.0, v43 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v44 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_mul_f32_e32 v41, 1.0, v44 +; SI-NEXT: v_mul_f32_e32 v47, 1.0, v46 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_mul_f32_e32 v40, 1.0, v45 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v57 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v46 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_mul_f32_e32 v47, 1.0, v47 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_mul_f32_e32 v62, 1.0, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_mul_f32_e32 v42, 1.0, v56 -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v57 -; SI-NEXT: v_mul_f32_e64 v46, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v56, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v45, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v44, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v43, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v19, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v52, 1.0, s28 +; SI-NEXT: v_mul_f32_e32 v58, 1.0, v58 +; SI-NEXT: v_mul_f32_e64 v54, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v26, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v52, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v34, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v33, 1.0, s26 ; SI-NEXT: v_mul_f32_e64 v32, 1.0, s29 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_mul_f32_e32 v17, 1.0, v2 -; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16 -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB105_2 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: s_waitcnt expcnt(1) ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v46 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v20 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v54 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v26 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v52 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v21 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v3 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v50 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v45 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v15 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v29 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v14 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v13 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v56 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v37 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v19 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v44 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v24 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v11 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v52 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v52, v38 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v11 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v38 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v32 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v32, v31 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v62 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v39 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 -; SI-NEXT: v_mov_b32_e32 v14, v21 -; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v35 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v13 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v25 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v9 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v34 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v57, v8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v24 -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v39 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v22 -; SI-NEXT: v_mov_b32_e32 v22, v11 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v16 -; SI-NEXT: v_mov_b32_e32 v16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v51 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v27 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v48 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_mov_b32_e32 v25, v1 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v58 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v30 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v34 +; SI-NEXT: v_mov_b32_e32 v57, v13 +; SI-NEXT: v_mov_b32_e32 v40, v3 +; SI-NEXT: v_mov_b32_e32 v54, v50 +; SI-NEXT: v_mov_b32_e32 v46, v19 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v9 +; SI-NEXT: v_mov_b32_e32 v44, v15 +; SI-NEXT: v_mov_b32_e32 v9, v11 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v6 ; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v43 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v58 -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v60 ; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v59 -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v63 -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v61 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 -; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v28, v13 -; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v62 -; SI-NEXT: v_mov_b32_e32 v46, v27 -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v36 -; SI-NEXT: v_mov_b32_e32 v45, v26 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v48 -; SI-NEXT: v_mov_b32_e32 v44, v24 -; SI-NEXT: v_mov_b32_e32 v43, v23 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v53 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v54 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: v_mov_b32_e32 v7, v6 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v10 +; SI-NEXT: v_mov_b32_e32 v41, v27 +; SI-NEXT: v_mov_b32_e32 v52, v62 +; SI-NEXT: v_mov_b32_e32 v21, v58 +; SI-NEXT: v_mov_b32_e32 v58, v20 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v60 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v56 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v55 +; SI-NEXT: v_mov_b32_e32 v55, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v42 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v53, v5 +; SI-NEXT: v_mov_b32_e32 v42, v43 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v13 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v3 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v50 +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v17 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v15 +; SI-NEXT: v_mov_b32_e32 v5, v19 +; SI-NEXT: v_mov_b32_e32 v7, v15 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v63 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v47 +; SI-NEXT: v_mov_b32_e32 v47, v3 +; SI-NEXT: v_mov_b32_e32 v3, v17 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v41 -; SI-NEXT: v_mov_b32_e32 v56, v51 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v55 -; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_mov_b32_e32 v29, v2 -; SI-NEXT: v_mov_b32_e32 v2, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v50 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v26 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v49 -; SI-NEXT: v_mov_b32_e32 v23, v17 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v40 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v51 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v47 -; SI-NEXT: v_mov_b32_e32 v47, v34 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v38 -; SI-NEXT: v_mov_b32_e32 v34, v38 -; SI-NEXT: v_mov_b32_e32 v38, v8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v43 +; SI-NEXT: v_mov_b32_e32 v1, v13 ; SI-NEXT: s_branch .LBB105_3 ; SI-NEXT: .LBB105_2: -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_mov_b32_e32 v2, v37 -; SI-NEXT: v_mov_b32_e32 v37, v34 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_mov_b32_e32 v25, v1 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v21, v58 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v52, v62 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr24 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; kill: killed $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: v_mov_b32_e32 v7, v6 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr6 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; kill: killed $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; kill: killed $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; kill: killed $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: v_mov_b32_e32 v52, v38 -; SI-NEXT: v_mov_b32_e32 v22, v11 -; SI-NEXT: v_mov_b32_e32 v32, v31 -; SI-NEXT: v_mov_b32_e32 v16, v15 -; SI-NEXT: v_mov_b32_e32 v14, v21 -; SI-NEXT: v_mov_b32_e32 v28, v13 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v57, v8 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v54, v50 +; SI-NEXT: v_mov_b32_e32 v56, v47 +; SI-NEXT: v_mov_b32_e32 v9, v11 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v53, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mov_b32_e32 v46, v27 -; SI-NEXT: v_mov_b32_e32 v45, v26 -; SI-NEXT: v_mov_b32_e32 v44, v24 -; SI-NEXT: v_mov_b32_e32 v43, v23 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v56, v51 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v40, v3 +; SI-NEXT: v_mov_b32_e32 v44, v15 +; SI-NEXT: v_mov_b32_e32 v57, v13 +; SI-NEXT: v_mov_b32_e32 v46, v19 +; SI-NEXT: v_mov_b32_e32 v41, v27 ; SI-NEXT: s_mov_b64 s[4:5], -1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; kill: killed $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; kill: killed $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; kill: killed $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; kill: killed $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: v_mov_b32_e32 v42, v43 +; SI-NEXT: v_mov_b32_e32 v3, v17 +; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: .LBB105_3: ; %Flow -; SI-NEXT: v_mov_b32_e32 v51, v7 -; SI-NEXT: v_mov_b32_e32 v24, v10 -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload ; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] ; SI-NEXT: s_cbranch_vccnz .LBB105_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: v_mov_b32_e32 v9, v2 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v30 +; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v31 +; SI-NEXT: v_mov_b32_e32 v38, v9 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v38 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v38, 0xffff0000, v31 +; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(9) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v3 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v30 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; SI-NEXT: s_waitcnt vmcnt(8) +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v28 +; SI-NEXT: s_waitcnt vmcnt(6) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v34 +; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v29 +; SI-NEXT: v_and_b32_e32 v34, 0xffff0000, v30 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v32 +; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v1 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v40 +; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v4 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v26 +; SI-NEXT: v_alignbit_b32 v2, v4, v2, 16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v54 +; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; SI-NEXT: v_alignbit_b32 v1, v4, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v19 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v7 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v15 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 -; SI-NEXT: v_alignbit_b32 v1, v5, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v50 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v44 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 -; SI-NEXT: v_alignbit_b32 v1, v6, v1, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v7 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v16 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v5 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v57 +; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_alignbit_b32 v1, v7, v1, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v17 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v10 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v46 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v7 -; SI-NEXT: v_alignbit_b32 v1, v8, v1, 16 -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v22 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v13 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v63 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v41 +; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v8 -; SI-NEXT: v_alignbit_b32 v1, v9, v1, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v32 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v11 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v61 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v52 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v9 -; SI-NEXT: v_alignbit_b32 v1, v10, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v9 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v16 -; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v7 -; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v60 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v7 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 ; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v38, v11, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v11 -; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v52, v12, v1, 16 -; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v28 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 -; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v12 -; SI-NEXT: v_alignbit_b32 v1, v13, v1, 16 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v46 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v25 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v13 -; SI-NEXT: v_alignbit_b32 v1, v14, v1, 16 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v45 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24 +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v57 -; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_alignbit_b32 v50, v15, v1, 16 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v43 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v44 -; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v15 -; SI-NEXT: v_alignbit_b32 v1, v16, v1, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v3 +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v51 -; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v16 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v12 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 -; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 -; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v56 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v1, v18, v1, 16 -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v17 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v21 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v56 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_alignbit_b32 v39, v19, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v55 -; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v17 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v41, v42, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v40, v41, v17, 16 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v17, v39 -; SI-NEXT: v_mov_b32_e32 v39, v52 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v48, v49, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v42 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v53, v54, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_alignbit_b32 v2, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v26, v36, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_alignbit_b32 v4, v6, v4, 16 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v53 +; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_alignbit_b32 v6, v8, v6, 16 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v21 +; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21 +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v27, v35, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 +; SI-NEXT: v_alignbit_b32 v8, v10, v8, 16 +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_alignbit_b32 v10, v12, v10, 16 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 +; SI-NEXT: v_alignbit_b32 v12, v14, v12, 16 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v23 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_alignbit_b32 v14, v18, v14, 16 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) ; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 ; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v18 -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v37 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_and_b32_e32 v37, 0xffff0000, v5 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v18, v20, v18, 16 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_alignbit_b32 v62, v23, v1, 16 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v23 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v47 +; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 +; SI-NEXT: v_alignbit_b32 v24, v24, v23, 16 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v26 +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v1 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v1 -; SI-NEXT: v_alignbit_b32 v1, v30, v18, 16 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v47, v21, v12, 16 -; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v34 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 -; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v12 -; SI-NEXT: v_alignbit_b32 v31, v22, v11, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v34, 0xffff0000, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_alignbit_b32 v22, v21, v20, 16 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[48:49], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[7:8], 16 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_alignbit_b32 v12, v11, v18, 16 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v26 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v26 +; SI-NEXT: v_alignbit_b32 v26, v59, v25, 16 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v27 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[51:52], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[1:2], 16 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v19 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_alignbit_b32 v63, v61, v18, 16 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 -; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18 -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v18 -; SI-NEXT: v_alignbit_b32 v60, v59, v7, 16 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v7 -; SI-NEXT: v_alignbit_b32 v58, v29, v6, 16 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v6 -; SI-NEXT: v_alignbit_b32 v20, v19, v5, 16 -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_alignbit_b32 v16, v45, v16, 16 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_alignbit_b32 v18, v6, v5, 16 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload -; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x40c00000, v27 +; SI-NEXT: v_alignbit_b32 v28, v58, v27, 16 +; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v29 +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_alignbit_b32 v20, v55, v20, 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[19:20], 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v2, v6, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v6, v2, v3, 16 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v18, v5, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v63, v33, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v12, v8, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v1, v9, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v29 +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v29 +; SI-NEXT: v_alignbit_b32 v35, v43, v32, 16 +; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v30 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[62:63], v[34:35], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30 +; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v30 +; SI-NEXT: v_alignbit_b32 v39, v29, v32, 16 +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[5:6], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[31:32], v[27:28], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v31, v10, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[23:24], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v47, v32, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[17:18], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v62, v25, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[13:14], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v27, v13, 16 -; SI-NEXT: v_alignbit_b32 v5, v20, v4, 16 -; SI-NEXT: v_alignbit_b32 v4, v58, v37, 16 -; SI-NEXT: v_alignbit_b32 v3, v60, v34, 16 -; SI-NEXT: v_mov_b32_e32 v33, v38 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[11:12], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v2, v26, v14, 16 -; SI-NEXT: v_alignbit_b32 v38, v53, v15, 16 -; SI-NEXT: v_mov_b32_e32 v15, v22 -; SI-NEXT: v_alignbit_b32 v25, v48, v16, 16 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 +; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 ; SI-NEXT: .LBB105_5: ; %end -; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_or_b32_e32 v6, v8, v6 -; SI-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v52 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v6, v2 -; SI-NEXT: v_add_i32_e32 v6, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v2, v2, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v62 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v2, v2, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v43 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v29 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v2, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v19 -; SI-NEXT: v_or_b32_e32 v2, v2, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v2, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v51 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v58 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v29 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v25 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v60 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v36 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v63 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v33 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v30 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v33 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v15 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v15 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v39 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v15 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v21 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v13 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v62 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v50 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v31 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v41 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -233541,120 +235614,138 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:76 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:52 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:56 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:72 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v3 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:28 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v30 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:36 +; SI-NEXT: v_cvt_f16_f32_e32 v61, s28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:60 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:68 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v5 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v9 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v62, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v49, s21 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v13 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v25 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v4, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v29, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v15 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v17 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s23 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v13 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v9 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v27, s19 ; SI-NEXT: s_waitcnt vmcnt(14) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v47 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v12, v56 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v59 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v60 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v61 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v62 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v63 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v33 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v33, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v35 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v39 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v52, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v56 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v34 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_cvt_f16_f32_e32 v34, v37 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v21, v48 -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v48, v51 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v54 -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v51, v55 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v37, v40 -; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v25, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v60, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v57, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v58, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v59, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v63, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v46, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v47, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v56, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v62, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v45, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v61, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v36, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v58, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v56, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v53 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v43 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v59 ; SI-NEXT: s_cbranch_scc0 .LBB109_2 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_mov_b64 s[4:5], 0 @@ -233663,510 +235754,600 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i ; SI-NEXT: s_mov_b64 s[4:5], -1 ; SI-NEXT: .LBB109_3: ; %Flow ; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; SI-NEXT: v_mov_b32_e32 v41, v2 -; SI-NEXT: v_mov_b32_e32 v2, v48 +; SI-NEXT: v_mov_b32_e32 v8, v3 ; SI-NEXT: s_cbranch_vccnz .LBB109_5 ; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v39 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v62 +; SI-NEXT: v_mov_b32_e32 v47, v38 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v39, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v36 +; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v49 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v36, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v34 +; SI-NEXT: v_mov_b32_e32 v45, v35 +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v58 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v23 +; SI-NEXT: v_mov_b32_e32 v57, v33 +; SI-NEXT: v_or_b32_e32 v34, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v32 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v53 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v56 +; SI-NEXT: v_or_b32_e32 v32, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v30 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v9, v31 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_lshr_b64 v[58:59], v[33:34], 16 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v44 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_mov_b32_e32 v42, v44 -; SI-NEXT: v_or_b32_e32 v44, v1, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_or_b32_e32 v2, v2, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_or_b32_e32 v5, v5, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_or_b32_e32 v8, v8, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_or_b32_e32 v11, v11, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_or_b32_e32 v15, v15, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_or_b32_e32 v18, v18, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_or_b32_e32 v31, v31, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v33 -; SI-NEXT: v_or_b32_e32 v32, v32, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v36 -; SI-NEXT: v_or_b32_e32 v35, v35, v48 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v39 -; SI-NEXT: v_or_b32_e32 v38, v38, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_mov_b32_e32 v1, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v52 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_or_b32_e32 v49, v48, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v53 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_or_b32_e32 v52, v48, v51 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v30 -; SI-NEXT: v_or_b32_e32 v29, v29, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v59 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v30, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v26, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v22 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v22, v3, v5 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v49 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v54 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v18, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v54, v15 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v12 +; SI-NEXT: v_mov_b32_e32 v12, v42 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v16, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v14, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v51 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v5, v60 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v60 +; SI-NEXT: v_or_b32_e32 v43, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v55 +; SI-NEXT: v_or_b32_e32 v10, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v48 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v41 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48 +; SI-NEXT: v_or_b32_e32 v41, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; SI-NEXT: v_or_b32_e32 v6, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v63 +; SI-NEXT: v_lshr_b64 v[62:63], v[38:39], 16 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v47, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v47 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_or_b32_e32 v46, v48, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v61 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v45 -; SI-NEXT: v_or_b32_e32 v57, v48, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v63 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v51 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_or_b32_e32 v60, v48, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v62 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 -; SI-NEXT: v_or_b32_e32 v59, v54, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v56 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v48 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_or_b32_e32 v56, v54, v48 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v41 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v45, v40, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v7 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v49 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v51, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v51 +; SI-NEXT: v_or_b32_e32 v4, v3, v4 +; SI-NEXT: v_mov_b32_e32 v63, v51 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v45 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v47 +; SI-NEXT: v_or_b32_e32 v44, v28, v33 +; SI-NEXT: v_lshr_b64 v[46:47], v[29:30], 16 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v49, v24, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v11 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; SI-NEXT: v_or_b32_e32 v2, v2, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v52 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v52, v20, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v61 +; SI-NEXT: v_or_b32_e32 v61, v24, v29 +; SI-NEXT: v_mov_b32_e32 v38, v49 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v37, v20, v31 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v20, v12 +; SI-NEXT: v_or_b32_e32 v12, v28, v25 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v24, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v14, v14, v13 -; SI-NEXT: v_or_b32_e32 v23, v23, v17 -; SI-NEXT: v_or_b32_e32 v34, v34, v21 -; SI-NEXT: v_alignbit_b32 v4, v57, v4, 16 -; SI-NEXT: v_alignbit_b32 v63, v46, v51, 16 -; SI-NEXT: v_alignbit_b32 v62, v29, v48, 16 -; SI-NEXT: v_alignbit_b32 v61, v52, v54, 16 -; SI-NEXT: v_alignbit_b32 v13, v32, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v2, v21, 16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v7, v41, v55 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v28, v12 +; SI-NEXT: v_or_b32_e32 v12, v20, v21 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v7, v7, v40 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v20, v12 +; SI-NEXT: v_or_b32_e32 v12, v24, v17 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v7, v41, v10 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_or_b32_e32 v12, v28, v15 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v10, v35, v10, 16 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v7 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v7, v41, v20 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v20, v31, v20, 16 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v12 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v7 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v7, v41, v28 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v28, v12 +; SI-NEXT: v_or_b32_e32 v12, v20, v13 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v28, v15, v28, 16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v41, v7 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_or_b32_e32 v7, v41, v27 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v42, v43 -; SI-NEXT: v_alignbit_b32 v27, v11, v27, 16 +; SI-NEXT: v_or_b32_e32 v12, v24, v42 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v20, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v54 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v7, v41, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v1 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 -; SI-NEXT: v_mov_b32_e32 v1, v44 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_mov_b32_e32 v44, v7 -; SI-NEXT: v_or_b32_e32 v43, v42, v24 -; SI-NEXT: v_alignbit_b32 v26, v8, v26, 16 -; SI-NEXT: v_or_b32_e32 v7, v41, v37 -; SI-NEXT: v_mov_b32_e32 v51, v7 -; SI-NEXT: v_alignbit_b32 v41, v49, v55, 16 -; SI-NEXT: v_alignbit_b32 v7, v38, v40, 16 -; SI-NEXT: v_alignbit_b32 v24, v5, v24, 16 -; SI-NEXT: v_alignbit_b32 v37, v1, v37, 16 +; SI-NEXT: v_or_b32_e32 v12, v28, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[56:57], v[31:32], 16 +; SI-NEXT: v_or_b32_e32 v54, v20, v40 +; SI-NEXT: v_or_b32_e32 v20, v24, v5 +; SI-NEXT: v_mov_b32_e32 v33, v54 +; SI-NEXT: v_mov_b32_e32 v31, v55 +; SI-NEXT: v_lshr_b64 v[54:55], v[15:16], 16 +; SI-NEXT: v_mov_b32_e32 v15, v20 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v50, v8 +; SI-NEXT: v_or_b32_e32 v8, v28, v3 +; SI-NEXT: v_lshr_b64 v[28:29], v[5:6], 16 +; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v12, v50, v1 +; SI-NEXT: v_lshr_b64 v[49:50], v[35:36], 16 +; SI-NEXT: v_mov_b32_e32 v35, v44 +; SI-NEXT: v_lshr_b64 v[44:45], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[20:21], v[42:43], 16 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[20:21], v[9:10], 16 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[20:21], v[40:41], 16 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[12:13], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[3:4], 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[20:21], v[1:2], 16 +; SI-NEXT: v_mov_b32_e32 v42, v61 +; SI-NEXT: v_mov_b32_e32 v61, v37 +; SI-NEXT: v_mov_b32_e32 v37, v53 +; SI-NEXT: v_mov_b32_e32 v51, v43 ; SI-NEXT: .LBB109_5: ; %end -; SI-NEXT: v_and_b32_e32 v48, 0xffff, v60 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v4, v48, v4 -; SI-NEXT: buffer_store_dword v4, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v57 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v58 -; SI-NEXT: v_or_b32_e32 v4, v4, v48 -; SI-NEXT: v_add_i32_e32 v48, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v4, v48, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v59 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v63 -; SI-NEXT: v_or_b32_e32 v4, v4, v48 -; SI-NEXT: v_add_i32_e32 v48, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v4, v48, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v46 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v47 -; SI-NEXT: v_or_b32_e32 v4, v4, v48 -; SI-NEXT: v_add_i32_e32 v48, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v4, v48, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v56 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v62 -; SI-NEXT: v_or_b32_e32 v4, v4, v48 -; SI-NEXT: v_add_i32_e32 v48, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v4, v48, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v30 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v45 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v61 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v52 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v53 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v41 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v62 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v52 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v49 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v50 -; SI-NEXT: v_or_b32_e32 v4, v4, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v4, v29, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v49 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v38 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v38 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v39 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v36 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v58 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v35 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v56 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v61 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v46 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v30 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v36 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v44 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v13 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v33 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v50 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v20 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v22 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v21 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v17 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v19 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v54 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v28 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v16 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v12 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v27 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v12 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v12 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v44 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v26 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v9 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v12 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v43 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v24 -; SI-NEXT: v_or_b32_e32 v4, v4, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v31 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x6c, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v9 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x70, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v51 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v28 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v24 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v20 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v11 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll index 9b28fd9e7b6fd..64b5ecc8f6b8e 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll @@ -1209,37 +1209,35 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_lshr_b32 s10, s19, 16 +; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s10, s19, 16 +; SI-NEXT: s_lshr_b32 s11, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s6 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: v_mov_b32_e32 v3, s11 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v7, s10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v4i32_to_v8i16_scalar: @@ -3544,65 +3542,67 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s19, 24 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 8 -; SI-NEXT: s_lshr_b32 s9, s17, 24 -; SI-NEXT: s_lshr_b32 s10, s17, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 8 +; SI-NEXT: s_lshr_b32 s22, s19, 24 +; SI-NEXT: s_lshr_b32 s23, s19, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 8 +; SI-NEXT: s_lshr_b32 s25, s17, 24 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true -; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s19, 24 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 8 -; SI-NEXT: s_lshr_b32 s9, s17, 24 -; SI-NEXT: s_lshr_b32 s10, s17, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s22, s19, 24 +; SI-NEXT: s_lshr_b32 s23, s19, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 8 +; SI-NEXT: s_lshr_b32 s25, s17, 24 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 8 ; SI-NEXT: .LBB25_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_mov_b32_e32 v3, s10 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s11 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v5, s27 +; SI-NEXT: v_mov_b32_e32 v6, s26 +; SI-NEXT: v_mov_b32_e32 v7, s25 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v10, s6 +; SI-NEXT: v_mov_b32_e32 v11, s4 ; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v13, s8 -; SI-NEXT: v_mov_b32_e32 v14, s7 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v13, s24 +; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v15, s22 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_v4i32_to_v16i8_scalar: @@ -5664,36 +5664,41 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB37_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s11, s19, 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB37_4 ; SI-NEXT: .LBB37_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v11, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v10, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v9, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v8, s18, 1.0 +; SI-NEXT: v_lshr_b64 v[5:6], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[10:11], 16 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v11 +; SI-NEXT: s_branch .LBB37_5 ; SI-NEXT: .LBB37_3: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: s_branch .LBB37_2 ; SI-NEXT: .LBB37_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: v_mov_b32_e32 v11, s17 +; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s19 +; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v5, s4 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: .LBB37_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v10 +; SI-NEXT: v_mov_b32_e32 v2, v11 +; SI-NEXT: v_mov_b32_e32 v4, v8 +; SI-NEXT: v_mov_b32_e32 v6, v9 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v4f32_to_v8i16_scalar: @@ -7997,64 +8002,75 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s9, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s24, s19, 24 +; SI-NEXT: s_lshr_b32 s26, s19, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 8 +; SI-NEXT: s_lshr_b32 s22, s17, 24 +; SI-NEXT: s_lshr_b32 s23, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v4, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s18, 1.0 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v12 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v4 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v21, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v18, s18, 1.0 +; SI-NEXT: v_lshr_b64 v[0:1], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[11:12], v[18:19], 24 +; SI-NEXT: v_lshr_b64 v[16:17], v[18:19], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[18:19], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[20:21], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[20:21], 8 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v19 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v19 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v21 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v21 +; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr24 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 -; SI-NEXT: v_mov_b32_e32 v13, s11 -; SI-NEXT: v_mov_b32_e32 v14, s10 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v20, s16 +; SI-NEXT: v_mov_b32_e32 v21, s17 +; SI-NEXT: v_mov_b32_e32 v18, s18 +; SI-NEXT: v_mov_b32_e32 v19, s19 +; SI-NEXT: v_mov_b32_e32 v5, s25 +; SI-NEXT: v_mov_b32_e32 v6, s23 +; SI-NEXT: v_mov_b32_e32 v7, s22 +; SI-NEXT: v_mov_b32_e32 v13, s27 +; SI-NEXT: v_mov_b32_e32 v14, s26 +; SI-NEXT: v_mov_b32_e32 v15, s24 +; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v16, s12 +; SI-NEXT: v_mov_b32_e32 v9, s14 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: .LBB49_5: ; %end +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v20 +; SI-NEXT: v_mov_b32_e32 v4, v21 +; SI-NEXT: v_mov_b32_e32 v8, v18 +; SI-NEXT: v_mov_b32_e32 v10, v16 +; SI-NEXT: v_mov_b32_e32 v12, v19 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v4f32_to_v16i8_scalar: @@ -9769,37 +9785,35 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB57_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_lshr_b32 s10, s19, 16 +; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 ; SI-NEXT: s_add_u32 s18, s18, 3 ; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s10, s19, 16 +; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: .LBB57_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s6 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: v_mov_b32_e32 v3, s11 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v7, s10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB57_2 ; ; VI-LABEL: bitcast_v2i64_to_v8i16_scalar: @@ -12106,65 +12120,67 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s19, 24 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 8 -; SI-NEXT: s_lshr_b32 s9, s17, 24 -; SI-NEXT: s_lshr_b32 s10, s17, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 8 +; SI-NEXT: s_lshr_b32 s22, s19, 24 +; SI-NEXT: s_lshr_b32 s23, s19, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 8 +; SI-NEXT: s_lshr_b32 s25, s17, 24 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 ; SI-NEXT: s_add_u32 s18, s18, 3 ; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s19, 24 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 8 -; SI-NEXT: s_lshr_b32 s9, s17, 24 -; SI-NEXT: s_lshr_b32 s10, s17, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 8 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s22, s19, 24 +; SI-NEXT: s_lshr_b32 s23, s19, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 8 +; SI-NEXT: s_lshr_b32 s25, s17, 24 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 ; SI-NEXT: .LBB69_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: v_mov_b32_e32 v3, s10 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s11 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v5, s27 +; SI-NEXT: v_mov_b32_e32 v6, s26 +; SI-NEXT: v_mov_b32_e32 v7, s25 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v10, s6 +; SI-NEXT: v_mov_b32_e32 v11, s4 ; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v13, s8 -; SI-NEXT: v_mov_b32_e32 v14, s7 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v13, s24 +; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v15, s22 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v2i64_to_v16i8_scalar: @@ -13498,34 +13514,34 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB73_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s11, s19, 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB73_4 ; SI-NEXT: .LBB73_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[8:9], s[18:19], 1.0 ; SI-NEXT: v_add_f64 v[10:11], s[16:17], 1.0 -; SI-NEXT: v_alignbit_b32 v5, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v1, v11, v10, 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[10:11], 16 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v9 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v11 ; SI-NEXT: s_branch .LBB73_5 ; SI-NEXT: .LBB73_3: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: s_branch .LBB73_2 ; SI-NEXT: .LBB73_4: -; SI-NEXT: v_mov_b32_e32 v11, s17 ; SI-NEXT: v_mov_b32_e32 v9, s19 -; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v11, s17 ; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: .LBB73_5: ; %end ; SI-NEXT: v_mov_b32_e32 v0, v10 ; SI-NEXT: v_mov_b32_e32 v2, v11 @@ -15789,67 +15805,73 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB85_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 8 -; SI-NEXT: s_lshr_b32 s8, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 8 +; SI-NEXT: s_lshr_b32 s27, s19, 24 +; SI-NEXT: s_lshr_b32 s26, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s19, 8 +; SI-NEXT: s_lshr_b32 s24, s17, 24 +; SI-NEXT: s_lshr_b32 s23, s17, 16 +; SI-NEXT: s_lshr_b32 s22, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB85_4 ; SI-NEXT: .LBB85_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[16:17], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[18:19], s[16:17], 1.0 -; SI-NEXT: v_alignbit_b32 v11, v17, v16, 24 -; SI-NEXT: v_alignbit_b32 v10, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v9, v17, v16, 8 -; SI-NEXT: v_alignbit_b32 v3, v19, v18, 24 -; SI-NEXT: v_alignbit_b32 v2, v19, v18, 16 -; SI-NEXT: v_alignbit_b32 v1, v19, v18, 8 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v17 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v17 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v19 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v19 +; SI-NEXT: v_add_f64 v[20:21], s[16:17], 1.0 +; SI-NEXT: v_add_f64 v[18:19], s[18:19], 1.0 +; SI-NEXT: v_lshr_b64 v[0:1], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[11:12], v[18:19], 24 +; SI-NEXT: v_lshr_b64 v[16:17], v[18:19], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[18:19], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[20:21], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[20:21], 8 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v19 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v19 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v21 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v21 ; SI-NEXT: s_branch .LBB85_5 ; SI-NEXT: .LBB85_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: s_branch .LBB85_2 ; SI-NEXT: .LBB85_4: -; SI-NEXT: v_mov_b32_e32 v19, s17 -; SI-NEXT: v_mov_b32_e32 v17, s19 -; SI-NEXT: v_mov_b32_e32 v16, s18 -; SI-NEXT: v_mov_b32_e32 v18, s16 -; SI-NEXT: v_mov_b32_e32 v7, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v5, s6 -; SI-NEXT: v_mov_b32_e32 v15, s11 -; SI-NEXT: v_mov_b32_e32 v14, s10 -; SI-NEXT: v_mov_b32_e32 v13, s9 +; SI-NEXT: v_mov_b32_e32 v19, s19 +; SI-NEXT: v_mov_b32_e32 v21, s17 +; SI-NEXT: v_mov_b32_e32 v20, s16 +; SI-NEXT: v_mov_b32_e32 v18, s18 +; SI-NEXT: v_mov_b32_e32 v15, s27 +; SI-NEXT: v_mov_b32_e32 v14, s26 +; SI-NEXT: v_mov_b32_e32 v13, s25 +; SI-NEXT: v_mov_b32_e32 v7, s24 +; SI-NEXT: v_mov_b32_e32 v6, s23 +; SI-NEXT: v_mov_b32_e32 v5, s22 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v0, s12 +; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v16, s6 +; SI-NEXT: v_mov_b32_e32 v11, s4 ; SI-NEXT: .LBB85_5: ; %end -; SI-NEXT: v_mov_b32_e32 v0, v18 -; SI-NEXT: v_mov_b32_e32 v4, v19 -; SI-NEXT: v_mov_b32_e32 v8, v16 -; SI-NEXT: v_mov_b32_e32 v12, v17 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v20 +; SI-NEXT: v_mov_b32_e32 v4, v21 +; SI-NEXT: v_mov_b32_e32 v8, v18 +; SI-NEXT: v_mov_b32_e32 v10, v16 +; SI-NEXT: v_mov_b32_e32 v12, v19 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v2f64_to_v16i8_scalar: @@ -17515,11 +17537,11 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: s_cmp_lg_u32 s24, 0 @@ -17530,8 +17552,8 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 @@ -17545,10 +17567,10 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v7 @@ -17557,11 +17579,13 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_or_b32_e32 v2, v2, v8 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[8:9], v[5:6], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 ; SI-NEXT: .LBB91_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v10 +; SI-NEXT: v_mov_b32_e32 v5, v8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB91_4: ; SI-NEXT: s_branch .LBB91_2 @@ -18405,60 +18429,62 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3 ; SI-NEXT: v_mul_f32_e64 v15, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v14, 1.0, s17 ; SI-NEXT: v_mul_f32_e64 v9, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 ; SI-NEXT: v_mul_f32_e64 v13, 1.0, s20 ; SI-NEXT: v_mul_f32_e64 v12, 1.0, s21 ; SI-NEXT: v_mul_f32_e64 v11, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v10, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v5, 1.0, s23 ; SI-NEXT: s_cbranch_scc0 .LBB95_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v14 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v12 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 ; SI-NEXT: s_cbranch_execnz .LBB95_3 ; SI-NEXT: .LBB95_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v14 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v14 ; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v15 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v12 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 ; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v12 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v10 -; SI-NEXT: v_alignbit_b32 v4, v4, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v11 ; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v2 +; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v8 -; SI-NEXT: v_alignbit_b32 v6, v7, v2, 16 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_alignbit_b32 v6, v7, v3, 16 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[8:9], v[5:6], 16 +; SI-NEXT: v_alignbit_b32 v4, v12, v13, 16 ; SI-NEXT: .LBB95_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v10 +; SI-NEXT: v_mov_b32_e32 v5, v8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB95_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: s_branch .LBB95_2 @@ -19152,30 +19178,28 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: v_alignbit_b32 v3, s8, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s8, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s8, v0, 8 -; SI-NEXT: s_or_b32 s9, s4, s5 -; SI-NEXT: v_mov_b32_e32 v0, s7 -; SI-NEXT: v_alignbit_b32 v11, s9, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s9, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s9, v0, 8 -; SI-NEXT: s_lshr_b32 s10, s8, 8 -; SI-NEXT: s_lshr_b32 s13, s9, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 24 +; SI-NEXT: s_and_b32 s6, s20, 0xffff +; SI-NEXT: s_lshl_b32 s7, s21, 16 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xffff +; SI-NEXT: s_lshl_b32 s9, s23, 16 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[24:25], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s9, s5, 8 +; SI-NEXT: s_lshr_b32 s15, s7, 8 ; SI-NEXT: s_and_b32 s11, s19, 0xffff -; SI-NEXT: s_and_b32 s14, s23, 0xffff -; SI-NEXT: s_bfe_u32 s12, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s15, s23, 0x80008 +; SI-NEXT: s_and_b32 s25, s23, 0xffff +; SI-NEXT: s_bfe_u32 s13, s19, 0x80008 +; SI-NEXT: s_bfe_u32 s27, s23, 0x80008 ; SI-NEXT: s_cbranch_execnz .LBB97_3 ; SI-NEXT: .LBB97_2: ; %cmp.true ; SI-NEXT: s_add_i32 s20, s20, 3 @@ -19183,64 +19207,66 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in ; SI-NEXT: s_lshl_b32 s5, s21, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 +; SI-NEXT: s_add_i32 s6, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s22, 0xffff ; SI-NEXT: s_lshl_b32 s5, s23, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 +; SI-NEXT: s_add_i32 s7, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v3, s8, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s8, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s8, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s7 -; SI-NEXT: v_alignbit_b32 v11, s9, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s9, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s9, v0, 8 -; SI-NEXT: s_lshr_b32 s12, s8, 24 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s10, s8, 8 -; SI-NEXT: s_lshr_b32 s15, s9, 24 -; SI-NEXT: s_lshr_b32 s14, s9, 16 -; SI-NEXT: s_lshr_b32 s13, s9, 8 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s8, s19, 16 +; SI-NEXT: s_or_b32 s5, s8, s5 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[24:25], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s13, s5, 24 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: s_lshr_b32 s9, s5, 8 +; SI-NEXT: s_lshr_b32 s27, s7, 24 +; SI-NEXT: s_lshr_b32 s25, s7, 16 +; SI-NEXT: s_lshr_b32 s15, s7, 8 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v4, s8 -; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v4, s5 +; SI-NEXT: v_mov_b32_e32 v5, s9 ; SI-NEXT: v_mov_b32_e32 v6, s11 -; SI-NEXT: v_mov_b32_e32 v7, s12 -; SI-NEXT: v_mov_b32_e32 v8, s7 -; SI-NEXT: v_mov_b32_e32 v12, s9 -; SI-NEXT: v_mov_b32_e32 v13, s13 -; SI-NEXT: v_mov_b32_e32 v14, s14 -; SI-NEXT: v_mov_b32_e32 v15, s15 +; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v8, s6 +; SI-NEXT: v_mov_b32_e32 v9, s26 +; SI-NEXT: v_mov_b32_e32 v10, s24 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v12, s7 +; SI-NEXT: v_mov_b32_e32 v13, s15 +; SI-NEXT: v_mov_b32_e32 v14, s25 +; SI-NEXT: v_mov_b32_e32 v15, s27 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr24 ; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v8i16_to_v16i8_scalar: @@ -20067,53 +20093,53 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 -; SI-NEXT: v_readfirstlane_b32 s6, v1 +; SI-NEXT: v_readfirstlane_b32 s14, v1 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s8, v0 +; SI-NEXT: v_readfirstlane_b32 s15, v0 ; SI-NEXT: s_cbranch_scc0 .LBB99_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s7, s23, 24 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s7, s5 -; SI-NEXT: s_or_b32 s10, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s7, s19, 24 -; SI-NEXT: s_or_b32 s4, s7, s4 -; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_or_b32 s40, s6, s5 +; SI-NEXT: s_or_b32 s6, s4, s40 +; SI-NEXT: s_and_b32 s4, s24, 0xff +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s26, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_or_b32 s42, s7, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_or_b32 s41, s5, s7 +; SI-NEXT: s_lshr_b64 s[8:9], s[40:41], 16 +; SI-NEXT: s_and_b32 s5, s28, 0xff ; SI-NEXT: s_lshl_b32 s9, s29, 8 -; SI-NEXT: s_or_b32 s7, s7, s9 -; SI-NEXT: s_and_b32 s9, s8, 0xff -; SI-NEXT: s_lshl_b32 s9, s9, 16 -; SI-NEXT: s_lshl_b32 s11, s6, 24 -; SI-NEXT: s_or_b32 s13, s11, s9 -; SI-NEXT: s_and_b32 s9, s26, 0xff +; SI-NEXT: s_or_b32 s5, s5, s9 +; SI-NEXT: s_and_b32 s9, s15, 0xff ; SI-NEXT: s_lshl_b32 s9, s9, 16 -; SI-NEXT: s_lshl_b32 s11, s27, 24 -; SI-NEXT: s_or_b32 s9, s11, s9 -; SI-NEXT: s_and_b32 s11, s16, 0xff -; SI-NEXT: s_lshl_b32 s12, s17, 8 -; SI-NEXT: s_or_b32 s11, s11, s12 -; SI-NEXT: s_and_b32 s11, s11, 0xffff -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_or_b32 s11, s11, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s12, s25, 8 -; SI-NEXT: s_and_b32 s7, s7, 0xffff -; SI-NEXT: s_or_b32 s4, s4, s12 -; SI-NEXT: v_alignbit_b32 v1, s10, v0, 16 -; SI-NEXT: s_or_b32 s7, s7, s13 -; SI-NEXT: v_mov_b32_e32 v0, s9 +; SI-NEXT: s_lshl_b32 s10, s14, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s12, s10, s9 +; SI-NEXT: s_or_b32 s43, s5, s12 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v5, s7, v0, 16 -; SI-NEXT: s_or_b32 s9, s4, s9 -; SI-NEXT: s_lshr_b32 s12, s5, 16 -; SI-NEXT: s_lshr_b32 s13, s13, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[42:43], 16 +; SI-NEXT: s_or_b32 s4, s4, s42 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s12, 16 +; SI-NEXT: s_mov_b32 s7, s41 +; SI-NEXT: s_mov_b32 s5, s43 ; SI-NEXT: s_cbranch_execnz .LBB99_3 ; SI-NEXT: .LBB99_2: ; %cmp.true ; SI-NEXT: s_add_i32 s24, s24, 3 @@ -20121,76 +20147,74 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in ; SI-NEXT: s_lshl_b32 s5, s25, 8 ; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s7, s26, 0xff +; SI-NEXT: s_and_b32 s6, s26, 0xff ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s27, 24 -; SI-NEXT: s_lshl_b32 s7, s7, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s7 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s8, s8, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_lshl_b32 s5, s6, 24 -; SI-NEXT: s_and_b32 s6, s8, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_add_i32 s28, s28, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s6, s29, 8 +; SI-NEXT: s_add_i32 s15, s15, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s7, s15, 0xff +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s14, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_lshl_b32 s5, s17, 8 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s16, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s18, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s19, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s8, s21, 8 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s22, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s10, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s11 -; SI-NEXT: v_alignbit_b32 v1, s10, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s9 -; SI-NEXT: v_alignbit_b32 v5, s7, v0, 16 -; SI-NEXT: s_lshr_b32 s12, s10, 16 -; SI-NEXT: s_lshr_b32 s13, s7, 16 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s9, s22, 0xff +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_lshr_b64 s[8:9], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s5, 16 ; SI-NEXT: .LBB99_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s11 -; SI-NEXT: v_mov_b32_e32 v2, s10 -; SI-NEXT: v_mov_b32_e32 v3, s12 -; SI-NEXT: v_mov_b32_e32 v4, s9 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v6, s5 +; SI-NEXT: v_mov_b32_e32 v7, s11 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: s_branch .LBB99_2 ; ; VI-LABEL: bitcast_v16i8_to_v8i16_scalar: @@ -22076,41 +22100,41 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i ; SI-LABEL: bitcast_v8f16_to_v16i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v18, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v26, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v14, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s22 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB105_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v18 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v8, v20, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; SI-NEXT: v_or_b32_e32 v0, v17, v0 -; SI-NEXT: v_or_b32_e32 v4, v16, v1 -; SI-NEXT: v_or_b32_e32 v12, v19, v5 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v23 +; SI-NEXT: v_or_b32_e32 v19, v16, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: v_or_b32_e32 v20, v8, v0 +; SI-NEXT: v_lshr_b64 v[0:1], v[19:20], 16 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v26 +; SI-NEXT: v_or_b32_e32 v17, v25, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v14 +; SI-NEXT: v_or_b32_e32 v18, v24, v1 +; SI-NEXT: v_lshr_b64 v[3:4], v[19:20], 24 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v20 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v18 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v15, v14, 8, 8 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[17:18], 24 +; SI-NEXT: v_lshr_b64 v[21:22], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[17:18], 8 ; SI-NEXT: s_cbranch_execnz .LBB105_3 ; SI-NEXT: .LBB105_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v0, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v24 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 @@ -22120,13 +22144,13 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v8, v1, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v18 +; SI-NEXT: v_or_b32_e32 v17, v1, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v23 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v14 -; SI-NEXT: v_or_b32_e32 v12, v2, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v17 +; SI-NEXT: v_or_b32_e32 v18, v2, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 @@ -22136,34 +22160,38 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v2, v1 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 +; SI-NEXT: v_or_b32_e32 v19, v1, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: v_or_b32_e32 v20, v2, v0 +; SI-NEXT: v_lshr_b64 v[0:1], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[19:20], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[17:18], 24 +; SI-NEXT: v_lshr_b64 v[21:22], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[17:18], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v20 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v18 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v15, v14, 8, 8 ; SI-NEXT: .LBB105_3: ; %end +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v19 +; SI-NEXT: v_mov_b32_e32 v4, v20 +; SI-NEXT: v_mov_b32_e32 v8, v17 +; SI-NEXT: v_mov_b32_e32 v10, v21 +; SI-NEXT: v_mov_b32_e32 v12, v18 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB105_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr0 ; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr15 ; SI-NEXT: s_branch .LBB105_2 @@ -24073,89 +24101,94 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_cmp_lg_u32 s24, 0 -; SI-NEXT: v_mul_f32_e64 v18, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v19, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v16, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v17, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v22, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v23, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v20, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v21, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v16, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v25, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v26, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v23, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v24, 1.0, s22 ; SI-NEXT: s_cbranch_scc0 .LBB109_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v20 -; SI-NEXT: v_alignbit_b32 v0, v0, v19, 16 -; SI-NEXT: v_alignbit_b32 v4, v6, v17, 16 -; SI-NEXT: v_alignbit_b32 v8, v5, v23, 16 -; SI-NEXT: v_alignbit_b32 v12, v14, v21, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v16 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v20 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; SI-NEXT: v_alignbit_b32 v19, v1, v16, 16 +; SI-NEXT: v_alignbit_b32 v20, v6, v8, 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v23 +; SI-NEXT: v_lshr_b64 v[3:4], v[19:20], 24 +; SI-NEXT: v_alignbit_b32 v21, v2, v26, 16 +; SI-NEXT: v_alignbit_b32 v22, v14, v24, 16 +; SI-NEXT: v_lshr_b64 v[4:5], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[10:11], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v0 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v20 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v23 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v22 +; SI-NEXT: v_lshr_b64 v[17:18], v[21:22], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[21:22], 8 ; SI-NEXT: s_cbranch_execnz .LBB109_3 ; SI-NEXT: .LBB109_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v22 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v25 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v26 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_alignbit_b32 v21, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v23 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v24 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v8, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v20 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v21 -; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v15 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v18 -; SI-NEXT: v_alignbit_b32 v12, v14, v0, 16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v19 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 +; SI-NEXT: v_alignbit_b32 v22, v14, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; SI-NEXT: v_alignbit_b32 v19, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v16 -; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_alignbit_b32 v4, v6, v1, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; SI-NEXT: v_alignbit_b32 v20, v6, v1, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[19:20], 24 +; SI-NEXT: v_lshr_b64 v[10:11], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[4:5], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 8 +; SI-NEXT: v_lshr_b64 v[17:18], v[21:22], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[21:22], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v20 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v22 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v0 ; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v15 ; SI-NEXT: .LBB109_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v19 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v20 +; SI-NEXT: v_mov_b32_e32 v8, v21 +; SI-NEXT: v_mov_b32_e32 v9, v11 +; SI-NEXT: v_mov_b32_e32 v11, v17 +; SI-NEXT: v_mov_b32_e32 v12, v22 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB109_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: s_branch .LBB109_2 ; ; VI-LABEL: bitcast_v8bf16_to_v16i8_scalar: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll index c87d52c1e6907..ee209f84efe7c 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll @@ -520,44 +520,41 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s21, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_lshr_b32 s12, s19, 16 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_mov_b32_e32 v0, s18 ; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s19, 16 -; SI-NEXT: s_lshr_b32 s7, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s12, s19, 16 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: v_mov_b32_e32 v3, s13 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v7, s12 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v5i32_to_v10i16_scalar: @@ -1731,42 +1728,47 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s21, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_4 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v8, 16 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v14, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v13, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v12, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v11, s18, 1.0 +; SI-NEXT: v_lshr_b64 v[5:6], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[8:9], 16 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v14 +; SI-NEXT: s_branch .LBB13_5 ; SI-NEXT: .LBB13_3: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: s_branch .LBB13_2 ; SI-NEXT: .LBB13_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 +; SI-NEXT: v_mov_b32_e32 v13, s16 +; SI-NEXT: v_mov_b32_e32 v14, s17 +; SI-NEXT: v_mov_b32_e32 v11, s18 +; SI-NEXT: v_mov_b32_e32 v12, s19 ; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v9, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v5, s4 +; SI-NEXT: .LBB13_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v13 +; SI-NEXT: v_mov_b32_e32 v2, v14 +; SI-NEXT: v_mov_b32_e32 v4, v11 +; SI-NEXT: v_mov_b32_e32 v6, v12 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v5f32_to_v10i16_scalar: @@ -3319,11 +3321,11 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 @@ -3333,49 +3335,51 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB23_3 ; SI-NEXT: .LBB23_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v9 -; SI-NEXT: v_or_b32_e32 v8, v8, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_or_b32_e32 v6, v6, v10 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_or_b32_e32 v2, v2, v10 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 16 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v9 +; SI-NEXT: v_lshr_b64 v[11:12], v[5:6], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 +; SI-NEXT: v_or_b32_e32 v8, v8, v13 ; SI-NEXT: .LBB23_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v10 +; SI-NEXT: v_mov_b32_e32 v5, v11 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB23_4: ; SI-NEXT: s_branch .LBB23_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll index c3ace0ac5af71..57eae8600dc4a 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll @@ -1360,50 +1360,47 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b32 s12, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s12, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v3, s14 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s6 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v7, s13 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s4 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s6 +; SI-NEXT: v_mov_b32_e32 v11, s12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v6i32_to_v12i16_scalar: @@ -3505,48 +3502,55 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB29_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_4 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v17, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v14, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v13, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v12, s20, 1.0 +; SI-NEXT: v_lshr_b64 v[9:10], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[16:17], 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v17 +; SI-NEXT: s_branch .LBB29_5 ; SI-NEXT: .LBB29_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB29_2 ; SI-NEXT: .LBB29_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v16, s16 +; SI-NEXT: v_mov_b32_e32 v17, s17 +; SI-NEXT: v_mov_b32_e32 v14, s18 +; SI-NEXT: v_mov_b32_e32 v15, s19 +; SI-NEXT: v_mov_b32_e32 v12, s20 +; SI-NEXT: v_mov_b32_e32 v13, s21 +; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v5, s6 +; SI-NEXT: v_mov_b32_e32 v9, s4 +; SI-NEXT: .LBB29_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v16 +; SI-NEXT: v_mov_b32_e32 v2, v17 +; SI-NEXT: v_mov_b32_e32 v4, v14 +; SI-NEXT: v_mov_b32_e32 v6, v15 +; SI-NEXT: v_mov_b32_e32 v8, v12 +; SI-NEXT: v_mov_b32_e32 v10, v13 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v6f32_to_v12i16_scalar: @@ -5249,50 +5253,47 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b32 s12, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 ; SI-NEXT: s_add_u32 s20, s20, 3 ; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s12, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v3, s14 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s6 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v7, s13 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s4 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s6 +; SI-NEXT: v_mov_b32_e32 v11, s12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v3i64_to_v12i16_scalar: @@ -6578,45 +6579,45 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s21, 16 +; SI-NEXT: s_lshr_b32 s13, s19, 16 +; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[16:17], s[16:17], 1.0 ; SI-NEXT: v_add_f64 v[12:13], s[20:21], 1.0 ; SI-NEXT: v_add_f64 v[14:15], s[18:19], 1.0 -; SI-NEXT: v_alignbit_b32 v9, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v5, v15, v14, 16 -; SI-NEXT: v_alignbit_b32 v1, v17, v16, 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[16:17], 16 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v15 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v17 ; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v17, s17 ; SI-NEXT: v_mov_b32_e32 v16, s16 -; SI-NEXT: v_mov_b32_e32 v15, s19 ; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v13, s21 ; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v17, s17 +; SI-NEXT: v_mov_b32_e32 v15, s19 +; SI-NEXT: v_mov_b32_e32 v13, s21 +; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v5, s6 +; SI-NEXT: v_mov_b32_e32 v9, s4 ; SI-NEXT: .LBB49_5: ; %end ; SI-NEXT: v_mov_b32_e32 v0, v16 ; SI-NEXT: v_mov_b32_e32 v2, v17 @@ -8296,15 +8297,15 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: s_cmp_lg_u32 s28, 0 @@ -8317,53 +8318,56 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v12 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v10, v10, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v7 ; SI-NEXT: v_or_b32_e32 v6, v6, v12 ; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v2, v2, v12 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v2, v2, v12 +; SI-NEXT: v_lshr_b64 v[14:15], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[15:16], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[12:13], v[9:10], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 ; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v14 +; SI-NEXT: v_mov_b32_e32 v5, v15 +; SI-NEXT: v_mov_b32_e32 v9, v12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: ; SI-NEXT: s_branch .LBB59_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll index c830d6b344b6f..7d0897bb2151b 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll @@ -585,57 +585,53 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s23, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_lshr_b32 s23, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s14, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s10 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v3, s23 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s8 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s7 +; SI-NEXT: v_mov_b32_e32 v7, s15 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s4 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s6 +; SI-NEXT: v_mov_b32_e32 v11, s14 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr23 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v7i32_to_v14i16_scalar: @@ -2048,54 +2044,61 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s23, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_4 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v13, s4, v12, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v20, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v18, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v17, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s20, 1.0 +; SI-NEXT: v_lshr_b64 v[9:10], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[12:13], 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v20 +; SI-NEXT: s_branch .LBB13_5 ; SI-NEXT: .LBB13_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: s_branch .LBB13_2 ; SI-NEXT: .LBB13_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 +; SI-NEXT: v_mov_b32_e32 v19, s16 +; SI-NEXT: v_mov_b32_e32 v20, s17 +; SI-NEXT: v_mov_b32_e32 v17, s18 +; SI-NEXT: v_mov_b32_e32 v18, s19 +; SI-NEXT: v_mov_b32_e32 v15, s20 +; SI-NEXT: v_mov_b32_e32 v16, s21 ; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v3, s14 +; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v11, s23 +; SI-NEXT: v_mov_b32_e32 v13, s6 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: v_mov_b32_e32 v9, s4 +; SI-NEXT: .LBB13_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v19 +; SI-NEXT: v_mov_b32_e32 v2, v20 +; SI-NEXT: v_mov_b32_e32 v4, v17 +; SI-NEXT: v_mov_b32_e32 v6, v18 +; SI-NEXT: v_mov_b32_e32 v8, v15 +; SI-NEXT: v_mov_b32_e32 v10, v16 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v7f32_to_v14i16_scalar: @@ -3965,22 +3968,21 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i ; SI-LABEL: bitcast_v14f16_to_v14i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v14, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 ; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB23_4 ; SI-NEXT: ; %bb.1: ; %cmp.false @@ -3993,17 +3995,14 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 @@ -4012,40 +4011,46 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v14 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_or_b32_e32 v10, v10, v14 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v7 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_or_b32_e32 v6, v6, v14 ; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_or_b32_e32 v2, v2, v14 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshr_b64 v[14:15], v[1:2], 16 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v2, v2, v14 +; SI-NEXT: v_lshr_b64 v[15:16], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[16:17], v[9:10], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 ; SI-NEXT: .LBB23_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v14 +; SI-NEXT: v_mov_b32_e32 v5, v15 +; SI-NEXT: v_mov_b32_e32 v9, v16 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB23_4: ; SI-NEXT: s_branch .LBB23_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll index 52e125d0d658f..cb4b3bd4382a4 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll @@ -1514,63 +1514,59 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s23, 16 +; SI-NEXT: s_lshr_b32 s15, s21, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s14, s23, 16 +; SI-NEXT: s_lshr_b32 s15, s21, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s10 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v3, s25 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s8 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: v_mov_b32_e32 v7, s24 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s7 +; SI-NEXT: v_mov_b32_e32 v11, s15 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s4 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v15, s14 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr24 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v8i32_to_v16i16_scalar: @@ -5255,119 +5251,123 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s23, 24 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 8 -; SI-NEXT: s_lshr_b32 s8, s21, 24 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s13, s19, 16 -; SI-NEXT: s_lshr_b32 s14, s19, 8 -; SI-NEXT: s_lshr_b32 s15, s17, 24 -; SI-NEXT: s_lshr_b32 s24, s17, 16 -; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_lshr_b32 s56, s23, 24 +; SI-NEXT: s_lshr_b32 s57, s23, 16 +; SI-NEXT: s_lshr_b32 s58, s23, 8 +; SI-NEXT: s_lshr_b32 s59, s21, 24 +; SI-NEXT: s_lshr_b32 s60, s21, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 8 +; SI-NEXT: s_lshr_b32 s62, s19, 24 +; SI-NEXT: s_lshr_b32 s63, s19, 16 +; SI-NEXT: s_lshr_b32 s72, s19, 8 +; SI-NEXT: s_lshr_b32 s73, s17, 24 +; SI-NEXT: s_lshr_b32 s74, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s23, 24 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 8 -; SI-NEXT: s_lshr_b32 s8, s21, 24 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s13, s19, 16 -; SI-NEXT: s_lshr_b32 s14, s19, 8 -; SI-NEXT: s_lshr_b32 s15, s17, 24 -; SI-NEXT: s_lshr_b32 s24, s17, 16 -; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s56, s23, 24 +; SI-NEXT: s_lshr_b32 s57, s23, 16 +; SI-NEXT: s_lshr_b32 s58, s23, 8 +; SI-NEXT: s_lshr_b32 s59, s21, 24 +; SI-NEXT: s_lshr_b32 s60, s21, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 8 +; SI-NEXT: s_lshr_b32 s62, s19, 24 +; SI-NEXT: s_lshr_b32 s63, s19, 16 +; SI-NEXT: s_lshr_b32 s72, s19, 8 +; SI-NEXT: s_lshr_b32 s73, s17, 24 +; SI-NEXT: s_lshr_b32 s74, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 ; SI-NEXT: .LBB25_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s44 +; SI-NEXT: v_mov_b32_e32 v2, s42 +; SI-NEXT: v_mov_b32_e32 v3, s40 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s25 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v5, s75 +; SI-NEXT: v_mov_b32_e32 v6, s74 +; SI-NEXT: v_mov_b32_e32 v7, s73 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s28 +; SI-NEXT: v_mov_b32_e32 v10, s26 +; SI-NEXT: v_mov_b32_e32 v11, s24 ; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v13, s14 -; SI-NEXT: v_mov_b32_e32 v14, s13 -; SI-NEXT: v_mov_b32_e32 v15, s11 +; SI-NEXT: v_mov_b32_e32 v13, s72 +; SI-NEXT: v_mov_b32_e32 v14, s63 +; SI-NEXT: v_mov_b32_e32 v15, s62 ; SI-NEXT: v_mov_b32_e32 v16, s20 +; SI-NEXT: v_mov_b32_e32 v17, s14 +; SI-NEXT: v_mov_b32_e32 v18, s12 +; SI-NEXT: v_mov_b32_e32 v19, s10 ; SI-NEXT: v_mov_b32_e32 v20, s21 -; SI-NEXT: v_mov_b32_e32 v21, s12 -; SI-NEXT: v_mov_b32_e32 v22, s10 -; SI-NEXT: v_mov_b32_e32 v23, s8 +; SI-NEXT: v_mov_b32_e32 v21, s61 +; SI-NEXT: v_mov_b32_e32 v22, s60 +; SI-NEXT: v_mov_b32_e32 v23, s59 ; SI-NEXT: v_mov_b32_e32 v24, s22 +; SI-NEXT: v_mov_b32_e32 v25, s8 +; SI-NEXT: v_mov_b32_e32 v26, s6 +; SI-NEXT: v_mov_b32_e32 v27, s4 ; SI-NEXT: v_mov_b32_e32 v28, s23 -; SI-NEXT: v_mov_b32_e32 v29, s9 -; SI-NEXT: v_mov_b32_e32 v30, s7 -; SI-NEXT: v_mov_b32_e32 v31, s6 +; SI-NEXT: v_mov_b32_e32 v29, s58 +; SI-NEXT: v_mov_b32_e32 v30, s57 +; SI-NEXT: v_mov_b32_e32 v31, s56 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr24 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_v8i32_to_v32i8_scalar: @@ -8503,60 +8503,69 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB37_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s23, 16 +; SI-NEXT: s_lshr_b32 s24, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB37_4 ; SI-NEXT: .LBB37_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v23, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v22, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v21, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v18, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v17, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s22, 1.0 +; SI-NEXT: v_lshr_b64 v[13:14], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[18:19], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[22:23], 16 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v23 +; SI-NEXT: s_branch .LBB37_5 ; SI-NEXT: .LBB37_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: s_branch .LBB37_2 ; SI-NEXT: .LBB37_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v22, s16 +; SI-NEXT: v_mov_b32_e32 v23, s17 +; SI-NEXT: v_mov_b32_e32 v20, s18 +; SI-NEXT: v_mov_b32_e32 v21, s19 +; SI-NEXT: v_mov_b32_e32 v18, s20 +; SI-NEXT: v_mov_b32_e32 v19, s21 +; SI-NEXT: v_mov_b32_e32 v16, s22 +; SI-NEXT: v_mov_b32_e32 v17, s23 +; SI-NEXT: v_mov_b32_e32 v3, s14 +; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v11, s24 +; SI-NEXT: v_mov_b32_e32 v15, s25 +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: v_mov_b32_e32 v9, s6 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: .LBB37_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v22 +; SI-NEXT: v_mov_b32_e32 v2, v23 +; SI-NEXT: v_mov_b32_e32 v4, v20 +; SI-NEXT: v_mov_b32_e32 v6, v21 +; SI-NEXT: v_mov_b32_e32 v8, v18 +; SI-NEXT: v_mov_b32_e32 v10, v19 +; SI-NEXT: v_mov_b32_e32 v12, v16 +; SI-NEXT: v_mov_b32_e32 v14, v17 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v8f32_to_v16i16_scalar: @@ -12246,116 +12255,137 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s15, s23, 24 -; SI-NEXT: s_lshr_b32 s24, s23, 16 -; SI-NEXT: s_lshr_b32 s25, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s9, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s72, s23, 24 +; SI-NEXT: s_lshr_b32 s74, s23, 16 +; SI-NEXT: s_lshr_b32 s75, s23, 8 +; SI-NEXT: s_lshr_b32 s61, s21, 24 +; SI-NEXT: s_lshr_b32 s63, s21, 16 +; SI-NEXT: s_lshr_b32 s73, s21, 8 +; SI-NEXT: s_lshr_b32 s58, s19, 24 +; SI-NEXT: s_lshr_b32 s60, s19, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 8 +; SI-NEXT: s_lshr_b32 s56, s17, 24 +; SI-NEXT: s_lshr_b32 s57, s17, 16 +; SI-NEXT: s_lshr_b32 s59, s17, 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v4, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v20, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v28, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v24, s22, 1.0 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v20 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v12 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v4 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v39, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v38, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v49, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v48, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v35, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v34, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v37, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v36, s20, 1.0 +; SI-NEXT: v_lshr_b64 v[27:28], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[24:25], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[0:1], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[48:49], 8 +; SI-NEXT: v_lshr_b64 v[19:20], v[36:37], 24 +; SI-NEXT: v_lshr_b64 v[32:33], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[36:37], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[34:35], 24 +; SI-NEXT: v_lshr_b64 v[28:29], v[34:35], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[34:35], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[38:39], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[38:39], 8 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v49 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v37 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v37 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v35 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v35 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v35 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v39 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v39 +; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr24 -; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v16, s20 -; SI-NEXT: v_mov_b32_e32 v20, s21 -; SI-NEXT: v_mov_b32_e32 v24, s22 -; SI-NEXT: v_mov_b32_e32 v28, s23 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 -; SI-NEXT: v_mov_b32_e32 v13, s11 -; SI-NEXT: v_mov_b32_e32 v14, s10 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v21, s14 -; SI-NEXT: v_mov_b32_e32 v22, s13 -; SI-NEXT: v_mov_b32_e32 v23, s12 -; SI-NEXT: v_mov_b32_e32 v29, s25 -; SI-NEXT: v_mov_b32_e32 v30, s24 -; SI-NEXT: v_mov_b32_e32 v31, s15 +; SI-NEXT: v_mov_b32_e32 v38, s16 +; SI-NEXT: v_mov_b32_e32 v39, s17 +; SI-NEXT: v_mov_b32_e32 v34, s18 +; SI-NEXT: v_mov_b32_e32 v35, s19 +; SI-NEXT: v_mov_b32_e32 v36, s20 +; SI-NEXT: v_mov_b32_e32 v37, s21 +; SI-NEXT: v_mov_b32_e32 v48, s22 +; SI-NEXT: v_mov_b32_e32 v49, s23 +; SI-NEXT: v_mov_b32_e32 v5, s59 +; SI-NEXT: v_mov_b32_e32 v6, s57 +; SI-NEXT: v_mov_b32_e32 v7, s56 +; SI-NEXT: v_mov_b32_e32 v13, s62 +; SI-NEXT: v_mov_b32_e32 v14, s60 +; SI-NEXT: v_mov_b32_e32 v15, s58 +; SI-NEXT: v_mov_b32_e32 v21, s73 +; SI-NEXT: v_mov_b32_e32 v22, s63 +; SI-NEXT: v_mov_b32_e32 v23, s61 +; SI-NEXT: v_mov_b32_e32 v29, s75 +; SI-NEXT: v_mov_b32_e32 v30, s74 +; SI-NEXT: v_mov_b32_e32 v31, s72 +; SI-NEXT: v_mov_b32_e32 v27, s40 +; SI-NEXT: v_mov_b32_e32 v24, s42 +; SI-NEXT: v_mov_b32_e32 v25, s44 +; SI-NEXT: v_mov_b32_e32 v19, s24 +; SI-NEXT: v_mov_b32_e32 v32, s26 +; SI-NEXT: v_mov_b32_e32 v17, s28 +; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v28, s12 +; SI-NEXT: v_mov_b32_e32 v9, s14 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: .LBB49_5: ; %end +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v38 +; SI-NEXT: v_mov_b32_e32 v4, v39 +; SI-NEXT: v_mov_b32_e32 v10, v28 +; SI-NEXT: v_mov_b32_e32 v8, v34 +; SI-NEXT: v_mov_b32_e32 v12, v35 +; SI-NEXT: v_mov_b32_e32 v18, v32 +; SI-NEXT: v_mov_b32_e32 v16, v36 +; SI-NEXT: v_mov_b32_e32 v20, v37 +; SI-NEXT: v_mov_b32_e32 v26, v24 +; SI-NEXT: v_mov_b32_e32 v24, v48 +; SI-NEXT: v_mov_b32_e32 v28, v49 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v8f32_to_v32i8_scalar: @@ -15064,63 +15094,59 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB57_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s23, 16 +; SI-NEXT: s_lshr_b32 s15, s21, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 ; SI-NEXT: s_add_u32 s22, s22, 3 ; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s14, s23, 16 +; SI-NEXT: s_lshr_b32 s15, s21, 16 +; SI-NEXT: s_lshr_b32 s24, s19, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: .LBB57_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s10 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v3, s25 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s8 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: v_mov_b32_e32 v7, s24 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s7 +; SI-NEXT: v_mov_b32_e32 v11, s15 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s4 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v15, s14 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr24 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB57_2 ; ; VI-LABEL: bitcast_v4i64_to_v16i16_scalar: @@ -18815,119 +18841,123 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s23, 24 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 8 -; SI-NEXT: s_lshr_b32 s8, s21, 24 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s13, s19, 16 -; SI-NEXT: s_lshr_b32 s14, s19, 8 -; SI-NEXT: s_lshr_b32 s15, s17, 24 -; SI-NEXT: s_lshr_b32 s24, s17, 16 -; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_lshr_b32 s56, s23, 24 +; SI-NEXT: s_lshr_b32 s57, s23, 16 +; SI-NEXT: s_lshr_b32 s58, s23, 8 +; SI-NEXT: s_lshr_b32 s59, s21, 24 +; SI-NEXT: s_lshr_b32 s60, s21, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 8 +; SI-NEXT: s_lshr_b32 s62, s19, 24 +; SI-NEXT: s_lshr_b32 s63, s19, 16 +; SI-NEXT: s_lshr_b32 s72, s19, 8 +; SI-NEXT: s_lshr_b32 s73, s17, 24 +; SI-NEXT: s_lshr_b32 s74, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 ; SI-NEXT: s_add_u32 s22, s22, 3 ; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s23, 24 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 8 -; SI-NEXT: s_lshr_b32 s8, s21, 24 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s13, s19, 16 -; SI-NEXT: s_lshr_b32 s14, s19, 8 -; SI-NEXT: s_lshr_b32 s15, s17, 24 -; SI-NEXT: s_lshr_b32 s24, s17, 16 -; SI-NEXT: s_lshr_b32 s25, s17, 8 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s56, s23, 24 +; SI-NEXT: s_lshr_b32 s57, s23, 16 +; SI-NEXT: s_lshr_b32 s58, s23, 8 +; SI-NEXT: s_lshr_b32 s59, s21, 24 +; SI-NEXT: s_lshr_b32 s60, s21, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 8 +; SI-NEXT: s_lshr_b32 s62, s19, 24 +; SI-NEXT: s_lshr_b32 s63, s19, 16 +; SI-NEXT: s_lshr_b32 s72, s19, 8 +; SI-NEXT: s_lshr_b32 s73, s17, 24 +; SI-NEXT: s_lshr_b32 s74, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 ; SI-NEXT: .LBB69_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s44 +; SI-NEXT: v_mov_b32_e32 v2, s42 +; SI-NEXT: v_mov_b32_e32 v3, s40 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s25 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v5, s75 +; SI-NEXT: v_mov_b32_e32 v6, s74 +; SI-NEXT: v_mov_b32_e32 v7, s73 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s28 +; SI-NEXT: v_mov_b32_e32 v10, s26 +; SI-NEXT: v_mov_b32_e32 v11, s24 ; SI-NEXT: v_mov_b32_e32 v12, s19 -; SI-NEXT: v_mov_b32_e32 v13, s14 -; SI-NEXT: v_mov_b32_e32 v14, s13 -; SI-NEXT: v_mov_b32_e32 v15, s11 +; SI-NEXT: v_mov_b32_e32 v13, s72 +; SI-NEXT: v_mov_b32_e32 v14, s63 +; SI-NEXT: v_mov_b32_e32 v15, s62 ; SI-NEXT: v_mov_b32_e32 v16, s20 +; SI-NEXT: v_mov_b32_e32 v17, s14 +; SI-NEXT: v_mov_b32_e32 v18, s12 +; SI-NEXT: v_mov_b32_e32 v19, s10 ; SI-NEXT: v_mov_b32_e32 v20, s21 -; SI-NEXT: v_mov_b32_e32 v21, s12 -; SI-NEXT: v_mov_b32_e32 v22, s10 -; SI-NEXT: v_mov_b32_e32 v23, s8 +; SI-NEXT: v_mov_b32_e32 v21, s61 +; SI-NEXT: v_mov_b32_e32 v22, s60 +; SI-NEXT: v_mov_b32_e32 v23, s59 ; SI-NEXT: v_mov_b32_e32 v24, s22 +; SI-NEXT: v_mov_b32_e32 v25, s8 +; SI-NEXT: v_mov_b32_e32 v26, s6 +; SI-NEXT: v_mov_b32_e32 v27, s4 ; SI-NEXT: v_mov_b32_e32 v28, s23 -; SI-NEXT: v_mov_b32_e32 v29, s9 -; SI-NEXT: v_mov_b32_e32 v30, s7 -; SI-NEXT: v_mov_b32_e32 v31, s6 +; SI-NEXT: v_mov_b32_e32 v29, s58 +; SI-NEXT: v_mov_b32_e32 v30, s57 +; SI-NEXT: v_mov_b32_e32 v31, s56 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr24 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v4i64_to_v32i8_scalar: @@ -21155,56 +21185,56 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB73_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s23, 16 +; SI-NEXT: s_lshr_b32 s24, s21, 16 +; SI-NEXT: s_lshr_b32 s15, s19, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB73_4 ; SI-NEXT: .LBB73_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[22:23], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[20:21], s[18:19], 1.0 ; SI-NEXT: v_add_f64 v[16:17], s[22:23], 1.0 ; SI-NEXT: v_add_f64 v[18:19], s[20:21], 1.0 -; SI-NEXT: v_alignbit_b32 v13, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v9, v19, v18, 16 -; SI-NEXT: v_alignbit_b32 v5, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v1, v23, v22, 16 +; SI-NEXT: v_add_f64 v[20:21], s[18:19], 1.0 +; SI-NEXT: v_add_f64 v[22:23], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[13:14], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[18:19], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[22:23], 16 ; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v17 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v23 ; SI-NEXT: s_branch .LBB73_5 ; SI-NEXT: .LBB73_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 ; SI-NEXT: s_branch .LBB73_2 ; SI-NEXT: .LBB73_4: -; SI-NEXT: v_mov_b32_e32 v23, s17 -; SI-NEXT: v_mov_b32_e32 v21, s19 -; SI-NEXT: v_mov_b32_e32 v19, s21 ; SI-NEXT: v_mov_b32_e32 v17, s23 -; SI-NEXT: v_mov_b32_e32 v16, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v20, s18 +; SI-NEXT: v_mov_b32_e32 v19, s21 +; SI-NEXT: v_mov_b32_e32 v21, s19 +; SI-NEXT: v_mov_b32_e32 v23, s17 ; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v20, s18 +; SI-NEXT: v_mov_b32_e32 v18, s20 +; SI-NEXT: v_mov_b32_e32 v16, s22 +; SI-NEXT: v_mov_b32_e32 v15, s25 +; SI-NEXT: v_mov_b32_e32 v11, s24 +; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_mov_b32_e32 v3, s14 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: v_mov_b32_e32 v9, s6 +; SI-NEXT: v_mov_b32_e32 v13, s4 ; SI-NEXT: .LBB73_5: ; %end ; SI-NEXT: v_mov_b32_e32 v0, v22 ; SI-NEXT: v_mov_b32_e32 v2, v23 @@ -24819,121 +24849,133 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB85_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v27, s23, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_alignbit_b32 v32, s23, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v19, s21, v0, 24 -; SI-NEXT: v_alignbit_b32 v18, s21, v0, 16 -; SI-NEXT: v_alignbit_b32 v33, s21, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s19, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v0, 16 -; SI-NEXT: v_alignbit_b32 v34, s19, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v35, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s25, s23, 24 -; SI-NEXT: s_lshr_b32 s24, s23, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 8 -; SI-NEXT: s_lshr_b32 s14, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s12, s21, 8 -; SI-NEXT: s_lshr_b32 s11, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 8 -; SI-NEXT: s_lshr_b32 s8, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s74, s23, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 8 +; SI-NEXT: s_lshr_b32 s72, s21, 24 +; SI-NEXT: s_lshr_b32 s63, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s21, 8 +; SI-NEXT: s_lshr_b32 s61, s19, 24 +; SI-NEXT: s_lshr_b32 s60, s19, 16 +; SI-NEXT: s_lshr_b32 s59, s19, 8 +; SI-NEXT: s_lshr_b32 s58, s17, 24 +; SI-NEXT: s_lshr_b32 s57, s17, 16 +; SI-NEXT: s_lshr_b32 s56, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB85_4 ; SI-NEXT: .LBB85_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[8:9], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[24:25], s[22:23], 1.0 -; SI-NEXT: v_add_f64 v[16:17], s[20:21], 1.0 -; SI-NEXT: v_alignbit_b32 v27, v25, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v25, v24, 16 -; SI-NEXT: v_alignbit_b32 v32, v25, v24, 8 -; SI-NEXT: v_alignbit_b32 v19, v17, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v33, v17, v16, 8 -; SI-NEXT: v_alignbit_b32 v11, v9, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v34, v9, v8, 8 -; SI-NEXT: v_alignbit_b32 v3, v1, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v1, v0, 16 -; SI-NEXT: v_alignbit_b32 v35, v1, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v25 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v25 -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v17 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v9 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v1 +; SI-NEXT: v_add_f64 v[50:51], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[37:38], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[48:49], s[16:17], 1.0 +; SI-NEXT: v_add_f64 v[35:36], s[18:19], 1.0 +; SI-NEXT: v_lshr_b64 v[24:25], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[0:1], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[50:51], 24 +; SI-NEXT: v_lshr_b64 v[25:26], v[50:51], 8 +; SI-NEXT: v_lshr_b64 v[19:20], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[17:18], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[35:36], 24 +; SI-NEXT: v_lshr_b64 v[33:34], v[35:36], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[35:36], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v51 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v51 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v38 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v36 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v49 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 ; SI-NEXT: s_branch .LBB85_5 ; SI-NEXT: .LBB85_3: -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr24 -; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: s_branch .LBB85_2 ; SI-NEXT: .LBB85_4: -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v9, s19 -; SI-NEXT: v_mov_b32_e32 v17, s21 -; SI-NEXT: v_mov_b32_e32 v25, s23 -; SI-NEXT: v_mov_b32_e32 v24, s22 -; SI-NEXT: v_mov_b32_e32 v16, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v7, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v5, s6 -; SI-NEXT: v_mov_b32_e32 v15, s11 -; SI-NEXT: v_mov_b32_e32 v14, s10 -; SI-NEXT: v_mov_b32_e32 v13, s9 -; SI-NEXT: v_mov_b32_e32 v23, s14 -; SI-NEXT: v_mov_b32_e32 v22, s13 -; SI-NEXT: v_mov_b32_e32 v21, s12 -; SI-NEXT: v_mov_b32_e32 v31, s25 -; SI-NEXT: v_mov_b32_e32 v30, s24 -; SI-NEXT: v_mov_b32_e32 v29, s15 +; SI-NEXT: v_mov_b32_e32 v51, s23 +; SI-NEXT: v_mov_b32_e32 v38, s21 +; SI-NEXT: v_mov_b32_e32 v36, s19 +; SI-NEXT: v_mov_b32_e32 v49, s17 +; SI-NEXT: v_mov_b32_e32 v48, s16 +; SI-NEXT: v_mov_b32_e32 v35, s18 +; SI-NEXT: v_mov_b32_e32 v37, s20 +; SI-NEXT: v_mov_b32_e32 v50, s22 +; SI-NEXT: v_mov_b32_e32 v31, s75 +; SI-NEXT: v_mov_b32_e32 v30, s74 +; SI-NEXT: v_mov_b32_e32 v29, s73 +; SI-NEXT: v_mov_b32_e32 v23, s72 +; SI-NEXT: v_mov_b32_e32 v22, s63 +; SI-NEXT: v_mov_b32_e32 v21, s62 +; SI-NEXT: v_mov_b32_e32 v15, s61 +; SI-NEXT: v_mov_b32_e32 v14, s60 +; SI-NEXT: v_mov_b32_e32 v13, s59 +; SI-NEXT: v_mov_b32_e32 v7, s58 +; SI-NEXT: v_mov_b32_e32 v6, s57 +; SI-NEXT: v_mov_b32_e32 v5, s56 +; SI-NEXT: v_mov_b32_e32 v1, s44 +; SI-NEXT: v_mov_b32_e32 v0, s42 +; SI-NEXT: v_mov_b32_e32 v3, s40 +; SI-NEXT: v_mov_b32_e32 v9, s28 +; SI-NEXT: v_mov_b32_e32 v33, s26 +; SI-NEXT: v_mov_b32_e32 v11, s24 +; SI-NEXT: v_mov_b32_e32 v17, s14 +; SI-NEXT: v_mov_b32_e32 v32, s12 +; SI-NEXT: v_mov_b32_e32 v19, s10 +; SI-NEXT: v_mov_b32_e32 v25, s8 +; SI-NEXT: v_mov_b32_e32 v24, s6 +; SI-NEXT: v_mov_b32_e32 v27, s4 ; SI-NEXT: .LBB85_5: ; %end -; SI-NEXT: v_mov_b32_e32 v4, v1 -; SI-NEXT: v_mov_b32_e32 v12, v9 -; SI-NEXT: v_mov_b32_e32 v20, v17 -; SI-NEXT: v_mov_b32_e32 v28, v25 -; SI-NEXT: v_mov_b32_e32 v1, v35 -; SI-NEXT: v_mov_b32_e32 v9, v34 -; SI-NEXT: v_mov_b32_e32 v17, v33 -; SI-NEXT: v_mov_b32_e32 v25, v32 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v4, v49 +; SI-NEXT: v_mov_b32_e32 v10, v33 +; SI-NEXT: v_mov_b32_e32 v8, v35 +; SI-NEXT: v_mov_b32_e32 v12, v36 +; SI-NEXT: v_mov_b32_e32 v18, v32 +; SI-NEXT: v_mov_b32_e32 v16, v37 +; SI-NEXT: v_mov_b32_e32 v20, v38 +; SI-NEXT: v_mov_b32_e32 v26, v24 +; SI-NEXT: v_mov_b32_e32 v24, v50 +; SI-NEXT: v_mov_b32_e32 v28, v51 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v4f64_to_v32i8_scalar: @@ -27681,26 +27723,24 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i ; SI-LABEL: bitcast_v16f16_to_v16i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v15, v1 -; SI-NEXT: v_mov_b32_e32 v14, v0 -; SI-NEXT: v_mov_b32_e32 v16, v2 +; SI-NEXT: v_mov_b32_e32 v5, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB91_4 ; SI-NEXT: ; %bb.1: ; %cmp.false @@ -27713,16 +27753,12 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v18 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 @@ -27730,51 +27766,59 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_or_b32_e32 v14, v14, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v11 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v10, v10, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v14, v14, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v7 ; SI-NEXT: v_or_b32_e32 v6, v6, v16 ; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v2, v2, v16 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v2, v2, v16 +; SI-NEXT: v_lshr_b64 v[18:19], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[16:17], v[13:14], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 ; SI-NEXT: .LBB91_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v18 +; SI-NEXT: v_mov_b32_e32 v5, v21 +; SI-NEXT: v_mov_b32_e32 v9, v19 +; SI-NEXT: v_mov_b32_e32 v13, v16 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB91_4: ; SI-NEXT: s_branch .LBB91_2 @@ -29151,115 +29195,119 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mul_f32_e64 v31, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v30, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v17, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v20, 1.0, s18 ; SI-NEXT: v_mul_f32_e64 v16, 1.0, s19 ; SI-NEXT: v_mul_f32_e64 v29, 1.0, s20 ; SI-NEXT: v_mul_f32_e64 v28, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v19, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v18, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v22, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v5, 1.0, s23 ; SI-NEXT: v_mul_f32_e64 v27, 1.0, s24 ; SI-NEXT: v_mul_f32_e64 v26, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v21, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v20, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v23, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s27 ; SI-NEXT: v_mul_f32_e64 v25, 1.0, s28 ; SI-NEXT: v_mul_f32_e64 v24, 1.0, s29 -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v0 -; SI-NEXT: v_mul_f32_e32 v22, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v0 +; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1 ; SI-NEXT: s_cbranch_scc0 .LBB95_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v20 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v16 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v29 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v27 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v24 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v13 ; SI-NEXT: s_cbranch_execnz .LBB95_3 ; SI-NEXT: .LBB95_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v30 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v30 ; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v31 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v12 ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v28 ; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v29 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v26 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v26 ; SI-NEXT: v_alignbit_b32 v4, v4, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v27 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 -; SI-NEXT: v_alignbit_b32 v8, v6, v2, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v24 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v25 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_alignbit_b32 v12, v7, v2, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v22 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v23 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_alignbit_b32 v8, v7, v2, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v25 +; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v24 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v7 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v20 -; SI-NEXT: v_alignbit_b32 v14, v15, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v21 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v2 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v13 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v23 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v6 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v18 -; SI-NEXT: v_alignbit_b32 v10, v11, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v5 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_alignbit_b32 v10, v11, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v22 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v16 -; SI-NEXT: v_alignbit_b32 v6, v7, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_alignbit_b32 v14, v15, v7, 16 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v16 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 +; SI-NEXT: v_alignbit_b32 v6, v7, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v20 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; SI-NEXT: v_alignbit_b32 v2, v3, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v12 +; SI-NEXT: v_lshr_b64 v[17:18], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[18:19], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[13:14], 16 +; SI-NEXT: v_alignbit_b32 v12, v24, v25, 16 ; SI-NEXT: .LBB95_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v17 +; SI-NEXT: v_mov_b32_e32 v5, v18 +; SI-NEXT: v_mov_b32_e32 v9, v21 +; SI-NEXT: v_mov_b32_e32 v13, v19 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB95_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: s_branch .LBB95_2 ; ; VI-LABEL: bitcast_v16bf16_to_v16i16_scalar: @@ -30446,80 +30494,83 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32 ; SI-LABEL: bitcast_v16i16_to_v32i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v4, v1 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: v_readfirstlane_b32 s78, v1 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_readfirstlane_b32 s79, v0 ; SI-NEXT: s_cbranch_scc0 .LBB97_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s12, s4, s5 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s9, s4, s5 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: v_mov_b32_e32 v6, s6 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: v_alignbit_b32 v11, s9, v6, 24 -; SI-NEXT: v_alignbit_b32 v10, s9, v6, 16 -; SI-NEXT: v_alignbit_b32 v9, s9, v6, 8 -; SI-NEXT: s_or_b32 s10, s4, s5 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_alignbit_b32 v19, s10, v6, 24 -; SI-NEXT: v_alignbit_b32 v18, s10, v6, 16 -; SI-NEXT: v_alignbit_b32 v17, s10, v6, 8 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v0 -; SI-NEXT: v_mov_b32_e32 v1, s11 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: v_or_b32_e32 v28, v6, v5 -; SI-NEXT: v_alignbit_b32 v3, s12, v1, 24 -; SI-NEXT: v_alignbit_b32 v2, s12, v1, 16 -; SI-NEXT: v_alignbit_b32 v1, s12, v1, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, s8, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, s8, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, s8, 8 -; SI-NEXT: s_lshr_b32 s44, s12, 8 -; SI-NEXT: s_lshr_b32 s14, s9, 8 -; SI-NEXT: s_lshr_b32 s41, s10, 8 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 -; SI-NEXT: s_and_b32 s45, s19, 0xffff -; SI-NEXT: s_and_b32 s15, s23, 0xffff -; SI-NEXT: s_and_b32 s42, s27, 0xffff -; SI-NEXT: v_and_b32_e32 v30, 0xffff, v4 -; SI-NEXT: s_bfe_u32 s13, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s40, s23, 0x80008 -; SI-NEXT: s_bfe_u32 s43, s27, 0x80008 -; SI-NEXT: v_bfe_u32 v31, v4, 8, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s6, s20, 0xffff +; SI-NEXT: s_lshl_b32 s7, s21, 16 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xffff +; SI-NEXT: s_lshl_b32 s8, s23, 16 +; SI-NEXT: s_or_b32 s7, s7, s8 +; SI-NEXT: s_and_b32 s8, s24, 0xffff +; SI-NEXT: s_lshl_b32 s9, s25, 16 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_and_b32 s9, s26, 0xffff +; SI-NEXT: s_lshl_b32 s10, s27, 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 24 +; SI-NEXT: s_or_b32 s9, s9, s10 +; SI-NEXT: s_and_b32 s10, s28, 0xffff +; SI-NEXT: s_lshl_b32 s11, s29, 16 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s79, 0xffff +; SI-NEXT: s_lshl_b32 s13, s78, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: s_lshr_b32 s13, s5, 8 +; SI-NEXT: s_lshr_b32 s41, s7, 8 +; SI-NEXT: s_lshr_b32 s47, s9, 8 +; SI-NEXT: s_lshr_b32 s88, s11, 8 +; SI-NEXT: s_and_b32 s15, s19, 0xffff +; SI-NEXT: s_and_b32 s45, s23, 0xffff +; SI-NEXT: s_and_b32 s59, s27, 0xffff +; SI-NEXT: s_and_b32 s90, s78, 0xffff +; SI-NEXT: s_bfe_u32 s43, s19, 0x80008 +; SI-NEXT: s_bfe_u32 s57, s23, 0x80008 +; SI-NEXT: s_bfe_u32 s89, s27, 0x80008 +; SI-NEXT: s_bfe_u32 s91, s78, 0x80008 +; SI-NEXT: s_lshr_b64 s[60:61], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[10:11], 8 ; SI-NEXT: s_cbranch_execnz .LBB97_3 ; SI-NEXT: .LBB97_2: ; %cmp.true ; SI-NEXT: s_add_i32 s28, s28, 3 ; SI-NEXT: s_and_b32 s4, s28, 0xffff ; SI-NEXT: s_lshl_b32 s5, s29, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_add_i32 s79, s79, 3 +; SI-NEXT: s_add_i32 s10, s4, 0x30000 +; SI-NEXT: s_and_b32 s4, s79, 0xffff +; SI-NEXT: s_lshl_b32 s5, s78, 16 +; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 +; SI-NEXT: s_add_i32 s11, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s24, 0xffff ; SI-NEXT: s_lshl_b32 s5, s25, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 +; SI-NEXT: s_add_i32 s8, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s26, 0xffff ; SI-NEXT: s_lshl_b32 s5, s27, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x30000 +; SI-NEXT: s_add_i32 s9, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s20, 0xffff ; SI-NEXT: s_lshl_b32 s5, s21, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 @@ -30529,99 +30580,103 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32 ; SI-NEXT: s_lshl_b32 s5, s23, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 +; SI-NEXT: s_add_i32 s7, s4, 0x30000 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: s_add_i32 s11, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: v_or_b32_e32 v0, v5, v0 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_mov_b32_e32 v4, s6 -; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0 -; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_add_i32 s12, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v1, s11 -; SI-NEXT: v_alignbit_b32 v11, s9, v4, 24 -; SI-NEXT: v_alignbit_b32 v10, s9, v4, 16 -; SI-NEXT: v_alignbit_b32 v9, s9, v4, 8 -; SI-NEXT: v_mov_b32_e32 v4, s7 -; SI-NEXT: v_alignbit_b32 v3, s12, v1, 24 -; SI-NEXT: v_alignbit_b32 v2, s12, v1, 16 -; SI-NEXT: v_alignbit_b32 v1, s12, v1, 8 -; SI-NEXT: v_alignbit_b32 v19, s10, v4, 24 -; SI-NEXT: v_alignbit_b32 v18, s10, v4, 16 -; SI-NEXT: v_alignbit_b32 v17, s10, v4, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v0, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v0, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v0, 8 -; SI-NEXT: s_lshr_b32 s13, s12, 24 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s12, 8 -; SI-NEXT: s_lshr_b32 s40, s9, 24 -; SI-NEXT: s_lshr_b32 s15, s9, 16 -; SI-NEXT: s_lshr_b32 s14, s9, 8 -; SI-NEXT: s_lshr_b32 s43, s10, 24 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s10, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s12, s19, 16 +; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[10:11], 8 +; SI-NEXT: s_lshr_b32 s43, s5, 24 +; SI-NEXT: s_lshr_b32 s15, s5, 16 +; SI-NEXT: s_lshr_b32 s13, s5, 8 +; SI-NEXT: s_lshr_b32 s57, s7, 24 +; SI-NEXT: s_lshr_b32 s45, s7, 16 +; SI-NEXT: s_lshr_b32 s41, s7, 8 +; SI-NEXT: s_lshr_b32 s89, s9, 24 +; SI-NEXT: s_lshr_b32 s59, s9, 16 +; SI-NEXT: s_lshr_b32 s47, s9, 8 +; SI-NEXT: s_lshr_b32 s91, s11, 24 +; SI-NEXT: s_lshr_b32 s90, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s11, 8 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s11 -; SI-NEXT: v_mov_b32_e32 v4, s12 -; SI-NEXT: v_mov_b32_e32 v5, s44 -; SI-NEXT: v_mov_b32_e32 v6, s45 -; SI-NEXT: v_mov_b32_e32 v7, s13 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s40 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v4, s5 +; SI-NEXT: v_mov_b32_e32 v5, s13 +; SI-NEXT: v_mov_b32_e32 v6, s15 +; SI-NEXT: v_mov_b32_e32 v7, s43 ; SI-NEXT: v_mov_b32_e32 v8, s6 -; SI-NEXT: v_mov_b32_e32 v12, s9 -; SI-NEXT: v_mov_b32_e32 v13, s14 -; SI-NEXT: v_mov_b32_e32 v14, s15 -; SI-NEXT: v_mov_b32_e32 v15, s40 -; SI-NEXT: v_mov_b32_e32 v16, s7 -; SI-NEXT: v_mov_b32_e32 v20, s10 -; SI-NEXT: v_mov_b32_e32 v21, s41 -; SI-NEXT: v_mov_b32_e32 v22, s42 -; SI-NEXT: v_mov_b32_e32 v23, s43 -; SI-NEXT: v_mov_b32_e32 v24, s8 +; SI-NEXT: v_mov_b32_e32 v9, s46 +; SI-NEXT: v_mov_b32_e32 v10, s44 +; SI-NEXT: v_mov_b32_e32 v11, s42 +; SI-NEXT: v_mov_b32_e32 v12, s7 +; SI-NEXT: v_mov_b32_e32 v13, s41 +; SI-NEXT: v_mov_b32_e32 v14, s45 +; SI-NEXT: v_mov_b32_e32 v15, s57 +; SI-NEXT: v_mov_b32_e32 v16, s8 +; SI-NEXT: v_mov_b32_e32 v17, s60 +; SI-NEXT: v_mov_b32_e32 v18, s58 +; SI-NEXT: v_mov_b32_e32 v19, s56 +; SI-NEXT: v_mov_b32_e32 v20, s9 +; SI-NEXT: v_mov_b32_e32 v21, s47 +; SI-NEXT: v_mov_b32_e32 v22, s59 +; SI-NEXT: v_mov_b32_e32 v23, s89 +; SI-NEXT: v_mov_b32_e32 v24, s10 +; SI-NEXT: v_mov_b32_e32 v25, s74 +; SI-NEXT: v_mov_b32_e32 v26, s62 +; SI-NEXT: v_mov_b32_e32 v27, s72 +; SI-NEXT: v_mov_b32_e32 v28, s11 +; SI-NEXT: v_mov_b32_e32 v29, s88 +; SI-NEXT: v_mov_b32_e32 v30, s90 +; SI-NEXT: v_mov_b32_e32 v31, s91 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr47 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v16i16_to_v32i8_scalar: @@ -32019,234 +32074,229 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18 -; SI-NEXT: v_mov_b32_e32 v19, v14 -; SI-NEXT: v_mov_b32_e32 v20, v12 -; SI-NEXT: v_readfirstlane_b32 s13, v11 -; SI-NEXT: v_readfirstlane_b32 s14, v10 -; SI-NEXT: v_readfirstlane_b32 s9, v3 -; SI-NEXT: v_readfirstlane_b32 s10, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: v_readfirstlane_b32 s6, v0 +; SI-NEXT: v_mov_b32_e32 v22, v14 +; SI-NEXT: v_mov_b32_e32 v21, v10 +; SI-NEXT: v_readfirstlane_b32 s43, v1 +; SI-NEXT: v_readfirstlane_b32 s42, v0 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7 -; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v3 ; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v17 ; SI-NEXT: s_cbranch_scc0 .LBB99_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s8, s19, 24 -; SI-NEXT: s_or_b32 s4, s8, s4 -; SI-NEXT: s_and_b32 s8, s28, 0xff -; SI-NEXT: s_lshl_b32 s12, s29, 8 -; SI-NEXT: s_or_b32 s8, s8, s12 -; SI-NEXT: s_and_b32 s12, s6, 0xff -; SI-NEXT: s_lshl_b32 s12, s12, 16 -; SI-NEXT: s_lshl_b32 s15, s7, 24 -; SI-NEXT: s_or_b32 s41, s15, s12 -; SI-NEXT: s_and_b32 s12, s26, 0xff -; SI-NEXT: s_lshl_b32 s12, s12, 16 -; SI-NEXT: s_lshl_b32 s15, s27, 24 -; SI-NEXT: s_or_b32 s12, s15, s12 -; SI-NEXT: s_and_b32 s15, s16, 0xff -; SI-NEXT: s_lshl_b32 s40, s17, 8 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v8 -; SI-NEXT: s_or_b32 s15, s15, s40 -; SI-NEXT: v_or_b32_e32 v9, v9, v2 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_and_b32 s15, s15, 0xffff -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v11, v0, v10 -; SI-NEXT: s_or_b32 s15, s15, s4 +; SI-NEXT: s_or_b32 s12, s6, s5 +; SI-NEXT: s_or_b32 s6, s4, s12 ; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s40, s25, 8 -; SI-NEXT: v_or_b32_e32 v10, v9, v11 +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s26, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_or_b32 s14, s7, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_or_b32 s13, s5, s7 +; SI-NEXT: s_lshr_b64 s[8:9], s[12:13], 16 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v12 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v8 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s29, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v16 +; SI-NEXT: s_or_b32 s5, s5, s9 +; SI-NEXT: s_and_b32 s9, s42, 0xff +; SI-NEXT: v_or_b32_e32 v9, v9, v23 +; SI-NEXT: v_or_b32_e32 v13, v24, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v15, v0, v14 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s43, 24 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v9 ; SI-NEXT: v_and_b32_e32 v9, 0xff, v4 -; SI-NEXT: s_or_b32 s4, s4, s40 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v5, s12 -; SI-NEXT: v_or_b32_e32 v12, v3, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v19 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v16 -; SI-NEXT: s_or_b32 s12, s4, s12 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s40, s9, 8 -; SI-NEXT: v_or_b32_e32 v9, v9, v21 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: s_or_b32 s4, s4, s40 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v15, v7, v13 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v14, v9, v15 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v20 -; SI-NEXT: v_or_b32_e32 v18, s4, v12 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s40, s13, 8 -; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: v_or_b32_e32 v10, v10, v1 +; SI-NEXT: v_or_b32_e32 v14, v14, v7 +; SI-NEXT: v_or_b32_e32 v26, v5, v17 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v21 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s12, s10, s9 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_or_b32 s4, s4, s40 -; SI-NEXT: s_or_b32 s8, s8, s41 -; SI-NEXT: v_or_b32_e32 v22, v17, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_or_b32_e32 v17, v17, v25 +; SI-NEXT: s_or_b32 s15, s5, s12 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v1, s11, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, s8, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v22, 16 -; SI-NEXT: v_or_b32_e32 v12, s4, v22 -; SI-NEXT: s_lshr_b32 s40, s5, 16 -; SI-NEXT: s_lshr_b32 s41, s41, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_or_b32_e32 v9, v3, v9 +; SI-NEXT: v_or_b32_e32 v10, v10, v15 +; SI-NEXT: v_or_b32_e32 v14, v14, v26 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 16 +; SI-NEXT: s_or_b32 s4, s4, s14 +; SI-NEXT: v_or_b32_e32 v19, v11, v9 +; SI-NEXT: v_mov_b32_e32 v20, v10 +; SI-NEXT: v_lshr_b64 v[9:10], v[9:10], 16 +; SI-NEXT: v_or_b32_e32 v17, v17, v13 +; SI-NEXT: v_mov_b32_e32 v18, v14 +; SI-NEXT: v_lshr_b64 v[13:14], v[13:14], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s12, 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v26 +; SI-NEXT: s_mov_b32 s7, s13 +; SI-NEXT: s_mov_b32 s5, s15 ; SI-NEXT: s_cbranch_execnz .LBB99_3 ; SI-NEXT: .LBB99_2: ; %cmp.true -; SI-NEXT: s_add_i32 s14, s14, 3 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s5, s13, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v17, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v19 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v16 -; SI-NEXT: v_or_b32_e32 v1, v21, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s10, s10, 3 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s9, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v4 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 ; SI-NEXT: s_and_b32 s4, s24, 0xff ; SI-NEXT: s_lshl_b32 s5, s25, 8 ; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s26, 0xff +; SI-NEXT: s_and_b32 s6, s26, 0xff ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s12, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s7, 24 ; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v21 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s6, s29, 8 +; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v12 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s7, s42, 0xff +; SI-NEXT: v_or_b32_e32 v9, v25, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_or_b32_e32 v2, v23, v2 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s43, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x300, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x300, v2 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_lshl_b32 s5, s17, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: v_or_b32_e32 v10, v24, v10 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s16, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s18, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v6 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v22 +; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v2 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v6 +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s19, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v16 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v8 -; SI-NEXT: s_add_i32 s15, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s8, s21, 8 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s22, 0xff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s9, s22, 0xff +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x300, v7 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v9 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: s_add_i32 s11, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s15 -; SI-NEXT: v_alignbit_b32 v1, s11, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s12 -; SI-NEXT: v_alignbit_b32 v5, s8, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v18, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s11, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v5 +; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_lshr_b64 s[8:9], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[17:18], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v18 ; SI-NEXT: .LBB99_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s15 -; SI-NEXT: v_mov_b32_e32 v2, s11 -; SI-NEXT: v_mov_b32_e32 v3, s40 -; SI-NEXT: v_mov_b32_e32 v4, s12 -; SI-NEXT: v_mov_b32_e32 v6, s8 -; SI-NEXT: v_mov_b32_e32 v7, s41 -; SI-NEXT: v_mov_b32_e32 v8, v18 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v6, s5 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v8, v19 +; SI-NEXT: v_mov_b32_e32 v10, v20 +; SI-NEXT: v_mov_b32_e32 v12, v17 +; SI-NEXT: v_mov_b32_e32 v14, v18 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 ; SI-NEXT: s_branch .LBB99_2 ; @@ -35384,116 +35434,121 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32 ; SI-LABEL: bitcast_v16f16_to_v32i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v34, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v33, s16 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v32, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v32, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v37, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v36, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v53, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v52, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v14, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v35, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v48, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v39, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s22 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_cvt_f16_f32_e32 v40, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v55, s24 ; SI-NEXT: v_cvt_f16_f32_e32 v22, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v38, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v51, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v50, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v54, s26 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v42, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v41, s28 ; SI-NEXT: v_cvt_f16_f32_e32 v30, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB105_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37 -; SI-NEXT: v_or_b32_e32 v8, v36, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v14 -; SI-NEXT: v_or_b32_e32 v12, v35, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48 -; SI-NEXT: v_or_b32_e32 v16, v39, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v22 -; SI-NEXT: v_or_b32_e32 v20, v38, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v51 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_or_b32_e32 v48, v16, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v24, v50, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v30 -; SI-NEXT: v_or_b32_e32 v0, v33, v0 -; SI-NEXT: v_or_b32_e32 v4, v32, v1 -; SI-NEXT: v_or_b32_e32 v28, v49, v5 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: v_or_b32_e32 v49, v8, v1 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: v_or_b32_e32 v35, v52, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v14 +; SI-NEXT: v_or_b32_e32 v36, v39, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 +; SI-NEXT: v_or_b32_e32 v37, v55, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22 +; SI-NEXT: v_or_b32_e32 v38, v54, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[35:36], 24 +; SI-NEXT: v_or_b32_e32 v33, v41, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_lshr_b64 v[4:5], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[12:13], v[35:36], 16 +; SI-NEXT: v_or_b32_e32 v34, v0, v2 +; SI-NEXT: v_lshr_b64 v[24:25], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[35:36], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v15, v14, 8, 8 ; SI-NEXT: v_bfe_u32 v23, v22, 8, 8 ; SI-NEXT: v_bfe_u32 v31, v30, 8, 8 +; SI-NEXT: v_lshr_b64 v[19:20], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[17:18], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[27:28], v[33:34], 24 +; SI-NEXT: v_lshr_b64 v[50:51], v[33:34], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[33:34], 8 ; SI-NEXT: s_cbranch_execnz .LBB105_3 ; SI-NEXT: .LBB105_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v0, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v39 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v30 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v2 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v49 -; SI-NEXT: v_or_b32_e32 v24, v1, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v33, v2, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v55 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v30 -; SI-NEXT: v_or_b32_e32 v28, v2, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v30 +; SI-NEXT: v_or_b32_e32 v34, v0, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v22 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v16, v3, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v37 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2 +; SI-NEXT: v_or_b32_e32 v37, v3, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v53 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v22, v0 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v52 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22 -; SI-NEXT: v_or_b32_e32 v20, v0, v2 +; SI-NEXT: v_or_b32_e32 v38, v0, v2 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v39 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v1 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_or_b32_e32 v8, v2, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v34 +; SI-NEXT: v_or_b32_e32 v35, v2, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v32 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v14 -; SI-NEXT: v_or_b32_e32 v12, v1, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v33 +; SI-NEXT: v_or_b32_e32 v36, v1, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v32 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 @@ -35503,60 +35558,72 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v2, v1 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: v_or_b32_e32 v48, v1, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6 +; SI-NEXT: v_or_b32_e32 v49, v2, v0 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[35:36], 24 +; SI-NEXT: v_lshr_b64 v[24:25], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[4:5], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshr_b64 v[12:13], v[35:36], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[35:36], 8 +; SI-NEXT: v_lshr_b64 v[19:20], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[17:18], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[27:28], v[33:34], 24 +; SI-NEXT: v_lshr_b64 v[50:51], v[33:34], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[33:34], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v15, v14, 8, 8 ; SI-NEXT: v_bfe_u32 v23, v22, 8, 8 ; SI-NEXT: v_bfe_u32 v31, v30, 8, 8 ; SI-NEXT: .LBB105_3: ; %end +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v49 +; SI-NEXT: v_mov_b32_e32 v8, v35 +; SI-NEXT: v_mov_b32_e32 v10, v12 +; SI-NEXT: v_mov_b32_e32 v12, v36 +; SI-NEXT: v_mov_b32_e32 v16, v37 +; SI-NEXT: v_mov_b32_e32 v18, v24 +; SI-NEXT: v_mov_b32_e32 v20, v38 +; SI-NEXT: v_mov_b32_e32 v24, v33 +; SI-NEXT: v_mov_b32_e32 v26, v50 +; SI-NEXT: v_mov_b32_e32 v28, v34 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB105_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr20 ; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: s_branch .LBB105_2 ; ; VI-LABEL: bitcast_v16f16_to_v32i8_scalar: @@ -38793,166 +38860,186 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mul_f32_e64 v34, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v35, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v32, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v33, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v38, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v39, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s23 -; SI-NEXT: v_mul_f32_e64 v37, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v50, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v51, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v48, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v49, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v54, 1.0, s29 -; SI-NEXT: v_mul_f32_e64 v55, 1.0, s28 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v0 +; SI-NEXT: v_mul_f32_e64 v24, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v32, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v16, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v51, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v52, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v39, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v50, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v55, 1.0, s25 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mul_f32_e64 v40, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v53, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v54, 1.0, s26 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mul_f32_e64 v42, 1.0, s29 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mul_f32_e64 v43, 1.0, s28 +; SI-NEXT: v_mul_f32_e32 v41, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v0, 1.0, v0 ; SI-NEXT: s_cbranch_scc0 .LBB109_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v38 -; SI-NEXT: v_alignbit_b32 v8, v5, v39, 16 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v50 -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v34 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v32 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v36 -; SI-NEXT: v_alignbit_b32 v16, v5, v51, 16 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v48 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v54 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v52 -; SI-NEXT: v_alignbit_b32 v0, v0, v35, 16 -; SI-NEXT: v_alignbit_b32 v4, v6, v33, 16 -; SI-NEXT: v_alignbit_b32 v12, v14, v37, 16 -; SI-NEXT: v_alignbit_b32 v20, v22, v49, 16 -; SI-NEXT: v_alignbit_b32 v24, v5, v55, 16 -; SI-NEXT: v_alignbit_b32 v28, v30, v53, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v32 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v36 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v48 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v52 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8 +; SI-NEXT: v_alignbit_b32 v48, v1, v32, 16 +; SI-NEXT: v_alignbit_b32 v49, v6, v16, 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v51 +; SI-NEXT: v_alignbit_b32 v37, v2, v52, 16 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v39 +; SI-NEXT: v_alignbit_b32 v35, v2, v40, 16 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v41 +; SI-NEXT: v_alignbit_b32 v38, v14, v50, 16 +; SI-NEXT: v_alignbit_b32 v36, v22, v54, 16 +; SI-NEXT: v_alignbit_b32 v33, v2, v43, 16 +; SI-NEXT: v_alignbit_b32 v34, v30, v0, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[19:20], v[35:36], 24 +; SI-NEXT: v_lshr_b64 v[27:28], v[33:34], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[12:13], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[20:21], v[35:36], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[33:34], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[17:18], v[35:36], 8 +; SI-NEXT: v_lshr_b64 v[25:26], v[33:34], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v39 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v53 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v41 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 ; SI-NEXT: s_cbranch_execnz .LBB109_3 ; SI-NEXT: .LBB109_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v54 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v42 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v43 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v24, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v52 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v53 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v41 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v31 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v50 -; SI-NEXT: v_alignbit_b32 v28, v30, v0, 16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v51 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v55 +; SI-NEXT: v_alignbit_b32 v34, v30, v0, 16 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v40 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v16, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v48 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v49 +; SI-NEXT: v_alignbit_b32 v35, v1, v0, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v53 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v54 ; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v23 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v38 -; SI-NEXT: v_alignbit_b32 v20, v22, v0, 16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v39 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v51 +; SI-NEXT: v_alignbit_b32 v36, v22, v0, 16 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v52 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_alignbit_b32 v8, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v36 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v37 +; SI-NEXT: v_alignbit_b32 v37, v1, v0, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v50 ; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v15 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v34 -; SI-NEXT: v_alignbit_b32 v12, v14, v0, 16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v35 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v24 +; SI-NEXT: v_alignbit_b32 v38, v14, v0, 16 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v32 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v32 -; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_alignbit_b32 v48, v1, v0, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_alignbit_b32 v4, v6, v1, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, v12, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, v12, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, v12, v8, 8 -; SI-NEXT: v_alignbit_b32 v19, v20, v16, 24 -; SI-NEXT: v_alignbit_b32 v18, v20, v16, 16 -; SI-NEXT: v_alignbit_b32 v17, v20, v16, 8 -; SI-NEXT: v_alignbit_b32 v27, v28, v24, 24 -; SI-NEXT: v_alignbit_b32 v26, v28, v24, 16 -; SI-NEXT: v_alignbit_b32 v25, v28, v24, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v12 -; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v20 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v28 +; SI-NEXT: v_alignbit_b32 v49, v6, v0, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[48:49], 24 +; SI-NEXT: v_lshr_b64 v[11:12], v[37:38], 24 +; SI-NEXT: v_lshr_b64 v[19:20], v[35:36], 24 +; SI-NEXT: v_lshr_b64 v[27:28], v[33:34], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 8 +; SI-NEXT: v_lshr_b64 v[12:13], v[37:38], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[37:38], 8 +; SI-NEXT: v_lshr_b64 v[20:21], v[35:36], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[35:36], 8 +; SI-NEXT: v_lshr_b64 v[28:29], v[33:34], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[33:34], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v49 +; SI-NEXT: v_lshrrev_b32_e32 v13, 8, v38 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v36 +; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v34 ; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 ; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v15 ; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v23 ; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v31 ; SI-NEXT: .LBB109_3: ; %end +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v49 +; SI-NEXT: v_mov_b32_e32 v8, v37 +; SI-NEXT: v_mov_b32_e32 v10, v12 +; SI-NEXT: v_mov_b32_e32 v12, v38 +; SI-NEXT: v_mov_b32_e32 v16, v35 +; SI-NEXT: v_mov_b32_e32 v18, v20 +; SI-NEXT: v_mov_b32_e32 v20, v36 +; SI-NEXT: v_mov_b32_e32 v24, v33 +; SI-NEXT: v_mov_b32_e32 v26, v28 +; SI-NEXT: v_mov_b32_e32 v28, v34 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB109_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr20 ; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: s_branch .LBB109_2 ; ; VI-LABEL: bitcast_v16bf16_to_v32i8_scalar: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll index 6cf53d187fcab..57de868ad37b3 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll @@ -647,70 +647,65 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s25, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s23, 16 +; SI-NEXT: s_lshr_b32 s26, s21, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 16 +; SI-NEXT: s_lshr_b32 s28, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s23, 16 -; SI-NEXT: s_lshr_b32 s7, s21, 16 -; SI-NEXT: s_lshr_b32 s8, s19, 16 -; SI-NEXT: s_lshr_b32 s9, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_lshr_b32 s25, s23, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s26, s21, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 16 +; SI-NEXT: s_lshr_b32 s28, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s12 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v3, s28 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s10 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: v_mov_b32_e32 v7, s27 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s7 +; SI-NEXT: v_mov_b32_e32 v11, s26 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s4 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s6 +; SI-NEXT: v_mov_b32_e32 v15, s25 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v9i32_to_v18i16_scalar: @@ -2361,66 +2356,75 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s25, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s25, s17, 16 +; SI-NEXT: s_lshr_b32 s28, s23, 16 +; SI-NEXT: s_lshr_b32 s27, s21, 16 +; SI-NEXT: s_lshr_b32 s26, s19, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_4 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v17, s4, v16, 16 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v26, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v24, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v23, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v22, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v21, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v19, s22, 1.0 +; SI-NEXT: v_lshr_b64 v[13:14], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[16:17], 16 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v26 +; SI-NEXT: s_branch .LBB13_5 ; SI-NEXT: .LBB13_3: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr28 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: s_branch .LBB13_2 ; SI-NEXT: .LBB13_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v25, s16 +; SI-NEXT: v_mov_b32_e32 v26, s17 +; SI-NEXT: v_mov_b32_e32 v23, s18 +; SI-NEXT: v_mov_b32_e32 v24, s19 +; SI-NEXT: v_mov_b32_e32 v21, s20 +; SI-NEXT: v_mov_b32_e32 v22, s21 +; SI-NEXT: v_mov_b32_e32 v19, s22 +; SI-NEXT: v_mov_b32_e32 v20, s23 ; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v3, s25 +; SI-NEXT: v_mov_b32_e32 v7, s26 +; SI-NEXT: v_mov_b32_e32 v11, s27 +; SI-NEXT: v_mov_b32_e32 v15, s28 +; SI-NEXT: v_mov_b32_e32 v17, s8 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v9, s6 +; SI-NEXT: v_mov_b32_e32 v13, s4 +; SI-NEXT: .LBB13_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v25 +; SI-NEXT: v_mov_b32_e32 v2, v26 +; SI-NEXT: v_mov_b32_e32 v4, v23 +; SI-NEXT: v_mov_b32_e32 v6, v24 +; SI-NEXT: v_mov_b32_e32 v8, v21 +; SI-NEXT: v_mov_b32_e32 v10, v22 +; SI-NEXT: v_mov_b32_e32 v12, v19 +; SI-NEXT: v_mov_b32_e32 v14, v20 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v9f32_to_v18i16_scalar: @@ -4659,112 +4663,114 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i ; SI-LABEL: bitcast_v18f16_to_v18i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 -; SI-NEXT: v_mov_b32_e32 v14, v0 -; SI-NEXT: v_mov_b32_e32 v18, v4 +; SI-NEXT: v_mov_b32_e32 v5, v3 +; SI-NEXT: v_mov_b32_e32 v9, v2 +; SI-NEXT: v_mov_b32_e32 v10, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB23_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB23_3 ; SI-NEXT: .LBB23_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v18 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_or_b32_e32 v16, v16, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v15 +; SI-NEXT: v_or_b32_e32 v14, v14, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_or_b32_e32 v10, v10, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v14, v14, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v7 ; SI-NEXT: v_or_b32_e32 v6, v6, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v16, v16, v18 ; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 ; SI-NEXT: v_or_b32_e32 v2, v2, v18 +; SI-NEXT: v_lshr_b64 v[18:19], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[13:14], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 ; SI-NEXT: .LBB23_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v23 +; SI-NEXT: v_mov_b32_e32 v5, v18 +; SI-NEXT: v_mov_b32_e32 v9, v21 +; SI-NEXT: v_mov_b32_e32 v13, v19 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB23_4: ; SI-NEXT: s_branch .LBB23_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll index 2abb2f3b9de52..3aaf25423a184 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll @@ -681,76 +681,71 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b32 s26, s25, 16 +; SI-NEXT: s_lshr_b32 s27, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s29, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s26, s25, 16 +; SI-NEXT: s_lshr_b32 s27, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s29, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s12 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v3, s40 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s10 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v7, s29 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s8 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v11, s28 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s6 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s7 +; SI-NEXT: v_mov_b32_e32 v15, s27 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s4 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s6 +; SI-NEXT: v_mov_b32_e32 v19, s26 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr28 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v10i32_to_v20i16_scalar: @@ -3371,241 +3366,239 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 ; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s28, 24 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s27, 8 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s26, 0xff -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s15, s15, 24 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s15, s5 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s5, s60, 8 +; SI-NEXT: s_and_b32 s7, s16, 0xff +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: s_and_b32 s7, s58, 0xff +; SI-NEXT: s_lshl_b32 s9, s56, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: s_and_b32 s5, s17, 0xff +; SI-NEXT: s_lshl_b32 s7, s94, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s93, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s92, 24 +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_lshl_b32 s5, s46, 8 +; SI-NEXT: s_and_b32 s7, s18, 0xff +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: s_and_b32 s7, s44, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s42, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s14, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s12, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s11, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s10, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s9, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s19, 0xff +; SI-NEXT: s_lshl_b32 s7, s91, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s90, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s89, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s40, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s26, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s21, 0xff +; SI-NEXT: s_lshl_b32 s7, s88, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s79, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s78, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s14, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s12, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s23, 0xff +; SI-NEXT: s_lshl_b32 s7, s77, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s76, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s75, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s24, 0xff +; SI-NEXT: s_lshl_b32 s7, s8, 8 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 ; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_lshl_b32 s5, s74, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_and_b32 s5, s73, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s6, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s72, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 36, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr28 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: ; implicit-def: $sgpr26 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v10i32_to_v40i8_scalar: @@ -7591,72 +7584,83 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a, ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB25_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s40, s25, 16 +; SI-NEXT: s_lshr_b32 s29, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 16 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB25_4 ; SI-NEXT: .LBB25_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v29, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v27, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v26, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v24, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v23, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v22, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v21, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v20, s24, 1.0 +; SI-NEXT: v_lshr_b64 v[17:18], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[22:23], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[24:25], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[26:27], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[28:29], 16 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v29 +; SI-NEXT: s_branch .LBB25_5 ; SI-NEXT: .LBB25_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr29 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB25_2 ; SI-NEXT: .LBB25_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 +; SI-NEXT: v_mov_b32_e32 v28, s16 +; SI-NEXT: v_mov_b32_e32 v29, s17 +; SI-NEXT: v_mov_b32_e32 v26, s18 +; SI-NEXT: v_mov_b32_e32 v27, s19 +; SI-NEXT: v_mov_b32_e32 v24, s20 +; SI-NEXT: v_mov_b32_e32 v25, s21 +; SI-NEXT: v_mov_b32_e32 v22, s22 +; SI-NEXT: v_mov_b32_e32 v23, s23 +; SI-NEXT: v_mov_b32_e32 v20, s24 +; SI-NEXT: v_mov_b32_e32 v21, s25 +; SI-NEXT: v_mov_b32_e32 v3, s26 +; SI-NEXT: v_mov_b32_e32 v7, s27 +; SI-NEXT: v_mov_b32_e32 v11, s28 +; SI-NEXT: v_mov_b32_e32 v15, s29 +; SI-NEXT: v_mov_b32_e32 v19, s40 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v13, s6 +; SI-NEXT: v_mov_b32_e32 v17, s4 +; SI-NEXT: .LBB25_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v28 +; SI-NEXT: v_mov_b32_e32 v2, v29 +; SI-NEXT: v_mov_b32_e32 v4, v26 +; SI-NEXT: v_mov_b32_e32 v6, v27 +; SI-NEXT: v_mov_b32_e32 v8, v24 +; SI-NEXT: v_mov_b32_e32 v10, v25 +; SI-NEXT: v_mov_b32_e32 v12, v22 +; SI-NEXT: v_mov_b32_e32 v14, v23 +; SI-NEXT: v_mov_b32_e32 v16, v20 +; SI-NEXT: v_mov_b32_e32 v18, v21 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v10f32_to_v20i16_scalar: @@ -10305,256 +10309,261 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB33_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s28, s25, 24 -; SI-NEXT: s_lshr_b32 s29, s25, 16 -; SI-NEXT: s_lshr_b32 s40, s25, 8 -; SI-NEXT: s_lshr_b32 s15, s23, 24 -; SI-NEXT: s_lshr_b32 s26, s23, 16 -; SI-NEXT: s_lshr_b32 s27, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s9, s19, 24 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s91, s25, 24 +; SI-NEXT: s_lshr_b32 s93, s25, 16 +; SI-NEXT: s_lshr_b32 s94, s25, 8 +; SI-NEXT: s_lshr_b32 s88, s23, 24 +; SI-NEXT: s_lshr_b32 s90, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s23, 8 +; SI-NEXT: s_lshr_b32 s77, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s89, s21, 8 +; SI-NEXT: s_lshr_b32 s74, s19, 24 +; SI-NEXT: s_lshr_b32 s76, s19, 16 +; SI-NEXT: s_lshr_b32 s78, s19, 8 +; SI-NEXT: s_lshr_b32 s72, s17, 24 +; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_lshr_b32 s75, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB33_4 ; SI-NEXT: .LBB33_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v31, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v34, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v28, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v29, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v23, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v24, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v21, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v17, s24, 1.0 -; SI-NEXT: v_alignbit_b32 v1, v16, v17, 24 -; SI-NEXT: v_alignbit_b32 v2, v16, v17, 16 -; SI-NEXT: v_alignbit_b32 v3, v16, v17, 8 -; SI-NEXT: v_alignbit_b32 v4, v18, v21, 24 -; SI-NEXT: v_alignbit_b32 v5, v18, v21, 16 -; SI-NEXT: v_alignbit_b32 v6, v18, v21, 8 -; SI-NEXT: v_alignbit_b32 v7, v23, v24, 24 -; SI-NEXT: v_alignbit_b32 v8, v23, v24, 16 -; SI-NEXT: v_alignbit_b32 v9, v23, v24, 8 -; SI-NEXT: v_alignbit_b32 v10, v28, v29, 24 -; SI-NEXT: v_alignbit_b32 v11, v28, v29, 16 -; SI-NEXT: v_alignbit_b32 v12, v28, v29, 8 -; SI-NEXT: v_alignbit_b32 v13, v31, v34, 24 -; SI-NEXT: v_alignbit_b32 v14, v31, v34, 16 -; SI-NEXT: v_alignbit_b32 v15, v31, v34, 8 -; SI-NEXT: v_lshrrev_b32_e32 v19, 24, v16 -; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v22, 8, v16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 24, v18 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v18 -; SI-NEXT: v_lshrrev_b32_e32 v30, 24, v23 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v23 -; SI-NEXT: v_lshrrev_b32_e32 v33, 8, v23 -; SI-NEXT: v_lshrrev_b32_e32 v35, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v28 -; SI-NEXT: v_lshrrev_b32_e32 v37, 8, v28 -; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v31 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v31 +; SI-NEXT: v_add_f32_e64 v5, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v6, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v12, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v13, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v7, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v10, s18, 1.0 +; SI-NEXT: v_readfirstlane_b32 s16, v6 +; SI-NEXT: v_readfirstlane_b32 s17, v5 +; SI-NEXT: v_add_f32_e64 v3, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v4, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v1, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v2, s24, 1.0 +; SI-NEXT: s_lshr_b64 s[26:27], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 8 +; SI-NEXT: v_readfirstlane_b32 s16, v10 +; SI-NEXT: v_readfirstlane_b32 s17, v7 +; SI-NEXT: v_readfirstlane_b32 s8, v2 +; SI-NEXT: v_readfirstlane_b32 s9, v1 +; SI-NEXT: v_readfirstlane_b32 s14, v4 +; SI-NEXT: v_readfirstlane_b32 s15, v3 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[16:17], 8 +; SI-NEXT: v_readfirstlane_b32 s16, v13 +; SI-NEXT: v_readfirstlane_b32 s17, v12 +; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 +; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v1 +; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v1 +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v3 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v16, 8, v3 +; SI-NEXT: v_lshrrev_b32_e32 v17, 24, v5 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v20, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v22, 8, v7 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v12 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v12 ; SI-NEXT: s_branch .LBB33_5 ; SI-NEXT: .LBB33_3: -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr27 -; SI-NEXT: ; implicit-def: $sgpr26 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB33_2 ; SI-NEXT: .LBB33_4: -; SI-NEXT: v_mov_b32_e32 v34, s16 -; SI-NEXT: v_mov_b32_e32 v31, s17 -; SI-NEXT: v_mov_b32_e32 v29, s18 -; SI-NEXT: v_mov_b32_e32 v28, s19 -; SI-NEXT: v_mov_b32_e32 v24, s20 -; SI-NEXT: v_mov_b32_e32 v23, s21 -; SI-NEXT: v_mov_b32_e32 v21, s22 -; SI-NEXT: v_mov_b32_e32 v18, s23 -; SI-NEXT: v_mov_b32_e32 v17, s24 -; SI-NEXT: v_mov_b32_e32 v16, s25 -; SI-NEXT: v_mov_b32_e32 v48, s8 -; SI-NEXT: v_mov_b32_e32 v39, s7 -; SI-NEXT: v_mov_b32_e32 v38, s6 -; SI-NEXT: v_mov_b32_e32 v37, s11 -; SI-NEXT: v_mov_b32_e32 v36, s10 -; SI-NEXT: v_mov_b32_e32 v35, s9 -; SI-NEXT: v_mov_b32_e32 v33, s14 -; SI-NEXT: v_mov_b32_e32 v32, s13 -; SI-NEXT: v_mov_b32_e32 v30, s12 -; SI-NEXT: v_mov_b32_e32 v27, s27 -; SI-NEXT: v_mov_b32_e32 v26, s26 -; SI-NEXT: v_mov_b32_e32 v25, s15 -; SI-NEXT: v_mov_b32_e32 v22, s40 -; SI-NEXT: v_mov_b32_e32 v20, s29 -; SI-NEXT: v_mov_b32_e32 v19, s28 +; SI-NEXT: v_mov_b32_e32 v13, s16 +; SI-NEXT: v_mov_b32_e32 v12, s17 +; SI-NEXT: v_mov_b32_e32 v10, s18 +; SI-NEXT: v_mov_b32_e32 v7, s19 +; SI-NEXT: v_mov_b32_e32 v6, s20 +; SI-NEXT: v_mov_b32_e32 v5, s21 +; SI-NEXT: v_mov_b32_e32 v4, s22 +; SI-NEXT: v_mov_b32_e32 v3, s23 +; SI-NEXT: v_mov_b32_e32 v2, s24 +; SI-NEXT: v_mov_b32_e32 v1, s25 +; SI-NEXT: v_mov_b32_e32 v25, s75 +; SI-NEXT: v_mov_b32_e32 v24, s73 +; SI-NEXT: v_mov_b32_e32 v23, s72 +; SI-NEXT: v_mov_b32_e32 v22, s78 +; SI-NEXT: v_mov_b32_e32 v21, s76 +; SI-NEXT: v_mov_b32_e32 v20, s74 +; SI-NEXT: v_mov_b32_e32 v19, s89 +; SI-NEXT: v_mov_b32_e32 v18, s79 +; SI-NEXT: v_mov_b32_e32 v17, s77 +; SI-NEXT: v_mov_b32_e32 v16, s92 +; SI-NEXT: v_mov_b32_e32 v15, s90 +; SI-NEXT: v_mov_b32_e32 v14, s88 +; SI-NEXT: v_mov_b32_e32 v11, s94 +; SI-NEXT: v_mov_b32_e32 v9, s93 +; SI-NEXT: v_mov_b32_e32 v8, s91 ; SI-NEXT: .LBB33_5: ; %end -; SI-NEXT: v_and_b32_e32 v34, 0xff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: v_or_b32_e32 v15, v34, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v48 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v38 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: s_lshl_b32 s5, s60, 8 +; SI-NEXT: v_or_b32_e32 v13, s5, v13 +; SI-NEXT: s_and_b32 s5, s58, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s56, 24 ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: v_or_b32_e32 v13, s5, v13 +; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_or_b32_e32 v12, v13, v12 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v25 +; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_and_b32_e32 v13, 0xff, v24 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: s_lshl_b32 s5, s46, 8 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 +; SI-NEXT: v_or_b32_e32 v10, s5, v10 +; SI-NEXT: s_and_b32 s5, s44, 0xff ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v37 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v35 +; SI-NEXT: v_or_b32_e32 v13, v23, v13 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s42, 24 +; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v10, s5, v10 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v12, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v33 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v30 +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v22 +; SI-NEXT: v_or_b32_e32 v7, v7, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v21 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: s_lshl_b32 s5, s40, 8 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v20 +; SI-NEXT: v_or_b32_e32 v6, s5, v6 +; SI-NEXT: s_and_b32 s5, s28, 0xff ; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v10, v12, v10 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s26, 24 +; SI-NEXT: v_or_b32_e32 v7, v7, v10 +; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v6, s5, v6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v27 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v25 +; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v19 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v18 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_lshl_b32 s5, s14, 8 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 +; SI-NEXT: v_or_b32_e32 v4, s5, v4 +; SI-NEXT: s_and_b32 s5, s12, 0xff +; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s10, 24 +; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_add_i32_e32 v6, vcc, 20, v0 ; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 28, v0 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v4, s5, v4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v16 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v15 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_lshl_b32 s5, s8, 8 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v14 +; SI-NEXT: v_or_b32_e32 v2, s5, v2 +; SI-NEXT: s_and_b32 s5, s6, 0xff ; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 +; SI-NEXT: v_or_b32_e32 v3, v3, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v2, s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v22 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v11 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v20 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v9 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v19 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v8 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -15184,124 +15193,127 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i ; SI-LABEL: bitcast_v20f16_to_v20i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v19, v5 -; SI-NEXT: v_mov_b32_e32 v18, v4 -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 +; SI-NEXT: v_mov_b32_e32 v9, v4 +; SI-NEXT: v_mov_b32_e32 v10, v3 +; SI-NEXT: v_mov_b32_e32 v13, v2 ; SI-NEXT: v_mov_b32_e32 v14, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB47_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB47_3 ; SI-NEXT: .LBB47_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v23 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v24 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v25 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v21 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_or_b32_e32 v18, v18, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_or_b32_e32 v14, v14, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_or_b32_e32 v10, v10, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v20 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v19 +; SI-NEXT: v_lshr_b64 v[24:25], v[9:10], 16 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v18, v18, v20 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_lshr_b64 v[25:26], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[17:18], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 ; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 ; SI-NEXT: .LBB47_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v23 +; SI-NEXT: v_mov_b32_e32 v5, v21 +; SI-NEXT: v_mov_b32_e32 v9, v24 +; SI-NEXT: v_mov_b32_e32 v13, v25 +; SI-NEXT: v_mov_b32_e32 v17, v26 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB47_4: ; SI-NEXT: s_branch .LBB47_2 @@ -16674,330 +16686,353 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32 ; SI-LABEL: bitcast_v20i16_to_v40i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v8, s30, 0 +; SI-NEXT: v_writelane_b32 v8, s31, 1 +; SI-NEXT: v_writelane_b32 v8, s34, 2 +; SI-NEXT: v_writelane_b32 v8, s35, 3 +; SI-NEXT: v_writelane_b32 v8, s36, 4 +; SI-NEXT: v_writelane_b32 v8, s37, 5 +; SI-NEXT: v_writelane_b32 v8, s38, 6 +; SI-NEXT: v_writelane_b32 v8, s39, 7 +; SI-NEXT: v_writelane_b32 v8, s48, 8 +; SI-NEXT: v_writelane_b32 v8, s49, 9 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; SI-NEXT: v_readfirstlane_b32 s72, v6 -; SI-NEXT: v_readfirstlane_b32 s73, v5 -; SI-NEXT: v_readfirstlane_b32 s62, v2 -; SI-NEXT: v_readfirstlane_b32 s63, v1 +; SI-NEXT: v_writelane_b32 v8, s50, 10 +; SI-NEXT: v_readfirstlane_b32 s39, v6 +; SI-NEXT: v_readfirstlane_b32 s48, v5 +; SI-NEXT: v_readfirstlane_b32 s49, v4 +; SI-NEXT: v_readfirstlane_b32 s50, v3 +; SI-NEXT: v_readfirstlane_b32 s35, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v4 +; SI-NEXT: v_readfirstlane_b32 s38, v1 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s13, s4, s5 +; SI-NEXT: s_or_b32 s12, s4, s5 ; SI-NEXT: s_and_b32 s4, s18, 0xffff ; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s14, s4, s5 +; SI-NEXT: s_or_b32 s13, s4, s5 ; SI-NEXT: s_and_b32 s4, s20, 0xffff ; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s11, s4, s5 +; SI-NEXT: s_or_b32 s10, s4, s5 ; SI-NEXT: s_and_b32 s4, s22, 0xffff ; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s12, s4, s5 +; SI-NEXT: s_or_b32 s11, s4, s5 ; SI-NEXT: s_and_b32 s4, s24, 0xffff ; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: v_mov_b32_e32 v1, s13 -; SI-NEXT: s_or_b32 s9, s4, s5 +; SI-NEXT: s_or_b32 s8, s4, s5 ; SI-NEXT: s_and_b32 s4, s26, 0xffff ; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: v_alignbit_b32 v7, s14, v1, 24 -; SI-NEXT: v_alignbit_b32 v12, s14, v1, 16 -; SI-NEXT: v_alignbit_b32 v16, s14, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s11 -; SI-NEXT: s_or_b32 s10, s4, s5 +; SI-NEXT: s_or_b32 s9, s4, s5 ; SI-NEXT: s_and_b32 s4, s28, 0xffff ; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: v_alignbit_b32 v8, s12, v1, 24 -; SI-NEXT: v_alignbit_b32 v13, s12, v1, 16 -; SI-NEXT: v_alignbit_b32 v17, s12, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s63, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: v_alignbit_b32 v6, s10, v1, 24 -; SI-NEXT: v_alignbit_b32 v11, s10, v1, 16 -; SI-NEXT: v_alignbit_b32 v15, s10, v1, 8 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_alignbit_b32 v5, s8, v1, 24 -; SI-NEXT: v_alignbit_b32 v9, s8, v1, 16 -; SI-NEXT: v_alignbit_b32 v14, s8, v1, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s73, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: v_or_b32_e32 v1, v1, v18 ; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: v_alignbit_b32 v2, s6, v1, 24 -; SI-NEXT: v_alignbit_b32 v4, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v10, s6, v1, 8 -; SI-NEXT: s_lshr_b32 s59, s14, 8 -; SI-NEXT: s_lshr_b32 s56, s12, 8 -; SI-NEXT: s_lshr_b32 s45, s10, 8 -; SI-NEXT: s_lshr_b32 s42, s8, 8 -; SI-NEXT: s_lshr_b32 s15, s6, 8 -; SI-NEXT: s_and_b32 s60, s19, 0xffff -; SI-NEXT: s_and_b32 s57, s23, 0xffff -; SI-NEXT: s_and_b32 s46, s27, 0xffff -; SI-NEXT: s_and_b32 s43, s62, 0xffff -; SI-NEXT: s_and_b32 s40, s72, 0xffff -; SI-NEXT: s_bfe_u32 s61, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s58, s23, 0x80008 -; SI-NEXT: s_bfe_u32 s47, s27, 0x80008 -; SI-NEXT: s_bfe_u32 s44, s62, 0x80008 -; SI-NEXT: s_bfe_u32 s41, s72, 0x80008 +; SI-NEXT: s_and_b32 s4, s38, 0xffff +; SI-NEXT: s_lshl_b32 s5, s35, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[12:13], 24 +; SI-NEXT: s_or_b32 s7, s4, s5 +; SI-NEXT: s_and_b32 s4, s50, 0xffff +; SI-NEXT: s_lshl_b32 s5, s49, 16 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s48, 0xffff +; SI-NEXT: s_lshl_b32 s15, s39, 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_or_b32 s5, s5, s15 +; SI-NEXT: s_lshr_b32 s34, s13, 8 +; SI-NEXT: s_lshr_b32 s95, s11, 8 +; SI-NEXT: s_lshr_b32 s59, s9, 8 +; SI-NEXT: s_lshr_b32 s45, s7, 8 +; SI-NEXT: s_lshr_b32 s15, s5, 8 +; SI-NEXT: s_and_b32 s36, s19, 0xffff +; SI-NEXT: s_and_b32 s30, s23, 0xffff +; SI-NEXT: s_and_b32 s61, s27, 0xffff +; SI-NEXT: s_and_b32 s47, s35, 0xffff +; SI-NEXT: s_and_b32 s41, s39, 0xffff +; SI-NEXT: s_bfe_u32 s37, s19, 0x80008 +; SI-NEXT: s_bfe_u32 s31, s23, 0x80008 +; SI-NEXT: s_bfe_u32 s94, s27, 0x80008 +; SI-NEXT: s_bfe_u32 s57, s35, 0x80008 +; SI-NEXT: s_bfe_u32 s43, s39, 0x80008 +; SI-NEXT: s_lshr_b64 s[88:89], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[4:5], 8 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: s_add_i32 s73, s73, 3 -; SI-NEXT: s_and_b32 s4, s73, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: s_add_i32 s50, s50, 3 +; SI-NEXT: s_and_b32 s4, s50, 0xffff +; SI-NEXT: s_lshl_b32 s5, s49, 16 +; SI-NEXT: s_add_i32 s48, s48, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s48, 0xffff +; SI-NEXT: s_lshl_b32 s6, s39, 16 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s63, s63, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s63, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s28, 0xffff +; SI-NEXT: s_lshl_b32 s7, s29, 16 +; SI-NEXT: s_add_i32 s38, s38, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s38, 0xffff +; SI-NEXT: s_lshl_b32 s8, s35, 16 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s8, s24, 0xffff +; SI-NEXT: s_lshl_b32 s9, s25, 16 ; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: s_and_b32 s9, s26, 0xffff +; SI-NEXT: s_lshl_b32 s10, s27, 16 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: s_and_b32 s10, s20, 0xffff +; SI-NEXT: s_lshl_b32 s11, s21, 16 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_and_b32 s11, s22, 0xffff +; SI-NEXT: s_lshl_b32 s12, s23, 16 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s12, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: s_and_b32 s12, s16, 0xffff +; SI-NEXT: s_lshl_b32 s13, s17, 16 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s13, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v3 -; SI-NEXT: s_add_i32 s14, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v2, s13 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_alignbit_b32 v7, s14, v2, 24 -; SI-NEXT: v_alignbit_b32 v12, s14, v2, 16 -; SI-NEXT: v_alignbit_b32 v16, s14, v2, 8 -; SI-NEXT: v_mov_b32_e32 v2, s11 -; SI-NEXT: v_or_b32_e32 v1, v18, v1 -; SI-NEXT: v_alignbit_b32 v8, s12, v2, 24 -; SI-NEXT: v_alignbit_b32 v13, s12, v2, 16 -; SI-NEXT: v_alignbit_b32 v17, s12, v2, 8 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x30000, v1 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_alignbit_b32 v6, s10, v2, 24 -; SI-NEXT: v_alignbit_b32 v11, s10, v2, 16 -; SI-NEXT: v_alignbit_b32 v15, s10, v2, 8 -; SI-NEXT: v_mov_b32_e32 v2, s7 -; SI-NEXT: v_alignbit_b32 v5, s8, v2, 24 -; SI-NEXT: v_alignbit_b32 v9, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v14, s8, v2, 8 -; SI-NEXT: v_alignbit_b32 v2, v3, v1, 24 -; SI-NEXT: v_alignbit_b32 v4, v3, v1, 16 -; SI-NEXT: v_alignbit_b32 v10, v3, v1, 8 -; SI-NEXT: s_lshr_b32 s61, s14, 24 -; SI-NEXT: s_lshr_b32 s60, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s14, 8 -; SI-NEXT: s_lshr_b32 s58, s12, 24 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s12, 8 -; SI-NEXT: s_lshr_b32 s47, s10, 24 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s10, 8 -; SI-NEXT: s_lshr_b32 s44, s8, 24 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s8, 8 -; SI-NEXT: s_lshr_b32 s41, s6, 24 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s6, 8 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: s_and_b32 s13, s18, 0xffff +; SI-NEXT: s_lshl_b32 s14, s19, 16 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: s_add_i32 s8, s8, 0x30000 +; SI-NEXT: s_add_i32 s9, s9, 0x30000 +; SI-NEXT: s_add_i32 s10, s10, 0x30000 +; SI-NEXT: s_add_i32 s11, s11, 0x30000 +; SI-NEXT: s_add_i32 s12, s12, 0x30000 +; SI-NEXT: s_add_i32 s13, s13, 0x30000 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_add_i32 s6, s6, 0x30000 +; SI-NEXT: s_add_i32 s7, s7, 0x30000 +; SI-NEXT: s_lshr_b64 s[14:15], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[40:41], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[78:79], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s37, s13, 24 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s34, s13, 8 +; SI-NEXT: s_lshr_b32 s31, s11, 24 +; SI-NEXT: s_lshr_b32 s30, s11, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 8 +; SI-NEXT: s_lshr_b32 s94, s9, 24 +; SI-NEXT: s_lshr_b32 s61, s9, 16 +; SI-NEXT: s_lshr_b32 s59, s9, 8 +; SI-NEXT: s_lshr_b32 s57, s7, 24 +; SI-NEXT: s_lshr_b32 s47, s7, 16 +; SI-NEXT: s_lshr_b32 s45, s7, 8 +; SI-NEXT: s_lshr_b32 s43, s5, 24 +; SI-NEXT: s_lshr_b32 s41, s5, 16 +; SI-NEXT: s_lshr_b32 s15, s5, 8 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v16 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: s_lshl_b32 s5, s59, 8 -; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s60, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s13, s61, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v7, v12 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s13, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s16, s56, 8 +; SI-NEXT: s_or_b32 s12, s12, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: s_lshl_b32 s13, s34, 8 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: s_and_b32 s13, s36, 0xff +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: s_lshl_b32 s14, s37, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s12, s60, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: s_and_b32 s12, s46, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s42, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v17 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: s_lshl_b32 s5, s56, 8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v13 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s57, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s58, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s11, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s95, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s30, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s31, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v15 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: s_lshl_b32 s5, s45, 8 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s88, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s58, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s44, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s46, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s47, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s59, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_and_b32 s9, s61, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s94, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v14 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s42, 8 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s74, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s90, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s78, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v9 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s43, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s7, s44, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s45, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s7, s47, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s57, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s76, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s72, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s62, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s7, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v10 -; SI-NEXT: s_and_b32 s4, s6, 0xff +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff ; SI-NEXT: s_lshl_b32 s5, s15, 8 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s40, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; SI-NEXT: s_and_b32 s5, s41, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s41, 24 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: s_lshl_b32 s6, s43, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 36, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s50, v8, 10 +; SI-NEXT: v_readlane_b32 s49, v8, 9 +; SI-NEXT: v_readlane_b32 s48, v8, 8 +; SI-NEXT: v_readlane_b32 s39, v8, 7 +; SI-NEXT: v_readlane_b32 s38, v8, 6 +; SI-NEXT: v_readlane_b32 s37, v8, 5 +; SI-NEXT: v_readlane_b32 s36, v8, 4 +; SI-NEXT: v_readlane_b32 s35, v8, 3 +; SI-NEXT: v_readlane_b32 s34, v8, 2 +; SI-NEXT: v_readlane_b32 s31, v8, 1 +; SI-NEXT: v_readlane_b32 s30, v8, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr8 ; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr60 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr95 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr58 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr94 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr47 +; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v20i16_to_v40i8_scalar: @@ -19045,286 +19080,280 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26 -; SI-NEXT: v_readfirstlane_b32 s14, v19 -; SI-NEXT: v_readfirstlane_b32 s40, v18 -; SI-NEXT: v_readfirstlane_b32 s12, v11 -; SI-NEXT: v_readfirstlane_b32 s13, v10 -; SI-NEXT: v_readfirstlane_b32 s8, v3 -; SI-NEXT: v_readfirstlane_b32 s9, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: v_readfirstlane_b32 s6, v0 +; SI-NEXT: v_mov_b32_e32 v31, v18 +; SI-NEXT: v_mov_b32_e32 v32, v14 +; SI-NEXT: v_mov_b32_e32 v33, v10 +; SI-NEXT: v_readfirstlane_b32 s43, v1 +; SI-NEXT: v_readfirstlane_b32 s42, v0 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v34, 8, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v36, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v35, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v19 +; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v7 ; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v27, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 -; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v13 -; SI-NEXT: v_lshlrev_b32_e32 v30, 8, v23 -; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v25 -; SI-NEXT: v_lshlrev_b32_e32 v31, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v25 ; SI-NEXT: s_cbranch_scc0 .LBB51_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s23, 24 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s10, s19, 24 -; SI-NEXT: s_or_b32 s4, s10, s4 -; SI-NEXT: s_and_b32 s10, s28, 0xff -; SI-NEXT: s_lshl_b32 s15, s29, 8 -; SI-NEXT: s_or_b32 s10, s10, s15 -; SI-NEXT: s_and_b32 s15, s6, 0xff -; SI-NEXT: s_lshl_b32 s15, s15, 16 -; SI-NEXT: s_lshl_b32 s41, s7, 24 -; SI-NEXT: s_or_b32 s43, s41, s15 -; SI-NEXT: s_and_b32 s15, s26, 0xff -; SI-NEXT: s_lshl_b32 s15, s15, 16 -; SI-NEXT: s_lshl_b32 s41, s27, 24 -; SI-NEXT: s_or_b32 s15, s41, s15 -; SI-NEXT: s_and_b32 s41, s16, 0xff -; SI-NEXT: s_lshl_b32 s42, s17, 8 -; SI-NEXT: s_or_b32 s41, s41, s42 -; SI-NEXT: s_and_b32 s41, s41, 0xffff -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v8 -; SI-NEXT: s_or_b32 s41, s41, s4 +; SI-NEXT: s_or_b32 s12, s6, s5 +; SI-NEXT: s_or_b32 s6, s4, s12 ; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s42, s25, 8 -; SI-NEXT: v_or_b32_e32 v9, v9, v2 +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s26, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_or_b32 s14, s7, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: v_and_b32_e32 v10, 0xff, v33 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: v_or_b32_e32 v10, v10, v36 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v12 +; SI-NEXT: s_or_b32 s13, s5, s7 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_or_b32 s4, s4, s42 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v11, v0, v10 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_or_b32_e32 v10, v9, v11 +; SI-NEXT: s_lshr_b64 s[8:9], s[12:13], 16 +; SI-NEXT: v_or_b32_e32 v13, v35, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v20 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v8 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v16 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s29, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_and_b32_e32 v25, 0xff, v24 +; SI-NEXT: s_or_b32 s5, s5, s9 +; SI-NEXT: s_and_b32 s9, s42, 0xff +; SI-NEXT: v_or_b32_e32 v9, v9, v34 +; SI-NEXT: v_or_b32_e32 v17, v37, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v19, v0, v14 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v32 +; SI-NEXT: v_or_b32_e32 v39, v5, v18 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s43, 24 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v9 ; SI-NEXT: v_and_b32_e32 v9, 0xff, v4 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v14 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v16 -; SI-NEXT: s_or_b32 s15, s4, s15 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s42, s8, 8 +; SI-NEXT: v_or_b32_e32 v10, v10, v3 +; SI-NEXT: v_or_b32_e32 v14, v14, v7 +; SI-NEXT: v_or_b32_e32 v18, v18, v23 +; SI-NEXT: v_or_b32_e32 v48, v21, v25 +; SI-NEXT: v_and_b32_e32 v25, 0xff, v31 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s12, s10, s9 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v13, v13, v27 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: s_or_b32 s4, s4, s42 -; SI-NEXT: v_or_b32_e32 v15, v3, v9 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_or_b32_e32 v19, v7, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v25, v13, v19 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v12 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v22 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v24 -; SI-NEXT: v_or_b32_e32 v23, s4, v15 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s42, s12, 8 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v17, v17, v30 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: s_or_b32 s4, s4, s42 -; SI-NEXT: v_or_b32_e32 v21, v28, v13 -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 -; SI-NEXT: v_or_b32_e32 v32, v29, v18 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v18, v17, v32 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v20 -; SI-NEXT: v_or_b32_e32 v26, s4, v21 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s42, s14, 8 -; SI-NEXT: s_and_b32 s10, s10, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: s_or_b32 s4, s4, s42 -; SI-NEXT: s_or_b32 s10, s10, s43 -; SI-NEXT: v_or_b32_e32 v33, v31, v17 +; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_or_b32_e32 v25, v25, v38 +; SI-NEXT: s_or_b32 s15, s5, s12 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v1, s11, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, s10, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v15, 16 -; SI-NEXT: v_alignbit_b32 v13, v25, v21, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v33, 16 -; SI-NEXT: v_or_b32_e32 v21, s4, v33 -; SI-NEXT: s_lshr_b32 s42, s5, 16 -; SI-NEXT: s_lshr_b32 s43, s43, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v32 +; SI-NEXT: v_or_b32_e32 v9, v1, v9 +; SI-NEXT: v_or_b32_e32 v10, v10, v19 +; SI-NEXT: v_or_b32_e32 v14, v14, v39 +; SI-NEXT: v_or_b32_e32 v18, v18, v48 +; SI-NEXT: v_and_b32_e32 v29, 0xffff, v25 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 16 +; SI-NEXT: s_or_b32 s4, s4, s14 +; SI-NEXT: v_or_b32_e32 v25, v11, v9 +; SI-NEXT: v_mov_b32_e32 v26, v10 +; SI-NEXT: v_lshr_b64 v[9:10], v[9:10], 16 +; SI-NEXT: v_or_b32_e32 v27, v15, v13 +; SI-NEXT: v_mov_b32_e32 v28, v14 +; SI-NEXT: v_lshr_b64 v[13:14], v[13:14], 16 +; SI-NEXT: v_or_b32_e32 v29, v29, v17 +; SI-NEXT: v_mov_b32_e32 v30, v18 +; SI-NEXT: v_lshr_b64 v[17:18], v[17:18], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s12, 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v48 +; SI-NEXT: s_mov_b32 s7, s13 +; SI-NEXT: s_mov_b32 s5, s15 ; SI-NEXT: s_cbranch_execnz .LBB51_3 ; SI-NEXT: .LBB51_2: ; %cmp.true -; SI-NEXT: s_add_i32 s40, s40, 3 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s5, s14, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v31, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v24 -; SI-NEXT: v_or_b32_e32 v1, v30, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v29, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s5, s12, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v12 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v28, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v14 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v16 -; SI-NEXT: v_or_b32_e32 v1, v27, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v4 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v31 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v20 +; SI-NEXT: v_or_b32_e32 v9, v38, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x300, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: v_or_b32_e32 v10, v37, v10 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v24 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 +; SI-NEXT: v_or_b32_e32 v9, v23, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 ; SI-NEXT: s_and_b32 s4, s24, 0xff ; SI-NEXT: s_lshl_b32 s5, s25, 8 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x300, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s26, 0xff +; SI-NEXT: s_and_b32 s6, s26, 0xff +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: v_or_b32_e32 v10, v21, v10 ; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: s_lshl_b32 s5, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s15, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s7, 24 ; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v33 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s6, s29, 8 +; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v12 +; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s7, s42, 0xff +; SI-NEXT: v_or_b32_e32 v9, v36, v9 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s43, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: v_add_i32_e32 v9, vcc, 0x300, v9 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_or_b32_e32 v2, v34, v2 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_lshl_b32 s5, s17, 8 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: v_or_b32_e32 v10, v35, v10 +; SI-NEXT: v_add_i32_e32 v2, vcc, 0x300, v2 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s16, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s18, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v1 +; SI-NEXT: v_or_b32_e32 v9, v10, v9 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v1, v1, v4 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s19, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v1 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v6 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 +; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v16 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v8 -; SI-NEXT: s_add_i32 s41, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s8, s21, 8 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s22, 0xff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s9, s22, 0xff +; SI-NEXT: v_add_i32_e32 v7, vcc, 0x300, v7 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: v_or_b32_e32 v5, v5, v9 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v0, v0, v2 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_or_b32_e32 v5, v5, v7 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: s_add_i32 s11, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s41 -; SI-NEXT: v_alignbit_b32 v1, s11, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s15 -; SI-NEXT: v_alignbit_b32 v5, s10, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v23, 16 -; SI-NEXT: v_alignbit_b32 v13, v25, v26, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v21, 16 -; SI-NEXT: s_lshr_b32 s42, s11, 16 -; SI-NEXT: s_lshr_b32 s43, s10, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v5 +; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_lshr_b64 s[8:9], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[27:28], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[29:30], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v30 ; SI-NEXT: .LBB51_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s41 -; SI-NEXT: v_mov_b32_e32 v2, s11 -; SI-NEXT: v_mov_b32_e32 v3, s42 -; SI-NEXT: v_mov_b32_e32 v4, s15 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, v23 -; SI-NEXT: v_mov_b32_e32 v12, v26 -; SI-NEXT: v_mov_b32_e32 v14, v25 -; SI-NEXT: v_mov_b32_e32 v16, v21 +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v6, s5 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v8, v25 +; SI-NEXT: v_mov_b32_e32 v10, v26 +; SI-NEXT: v_mov_b32_e32 v12, v27 +; SI-NEXT: v_mov_b32_e32 v14, v28 +; SI-NEXT: v_mov_b32_e32 v16, v29 +; SI-NEXT: v_mov_b32_e32 v18, v30 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB51_4: -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: s_branch .LBB51_2 ; @@ -20574,78 +20603,78 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB55_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v20, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v21, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v22, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v23, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v24, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s40, s25, 16 +; SI-NEXT: s_lshr_b32 s29, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s27, s19, 16 +; SI-NEXT: s_lshr_b32 s26, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB55_4 ; SI-NEXT: .LBB55_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[4:5], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[16:17], s[24:25], 1.0 -; SI-NEXT: v_add_f64 v[12:13], s[22:23], 1.0 -; SI-NEXT: v_alignbit_b32 v20, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v21, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v22, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v24, v1, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_add_f64 v[20:21], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[22:23], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[24:25], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[26:27], s[18:19], 1.0 +; SI-NEXT: v_add_f64 v[28:29], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[17:18], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[22:23], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[24:25], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[26:27], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[28:29], 16 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v29 ; SI-NEXT: s_branch .LBB55_5 ; SI-NEXT: .LBB55_3: -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr29 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB55_2 ; SI-NEXT: .LBB55_4: -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v5, s19 -; SI-NEXT: v_mov_b32_e32 v9, s21 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 +; SI-NEXT: v_mov_b32_e32 v21, s25 +; SI-NEXT: v_mov_b32_e32 v23, s23 +; SI-NEXT: v_mov_b32_e32 v25, s21 +; SI-NEXT: v_mov_b32_e32 v27, s19 +; SI-NEXT: v_mov_b32_e32 v29, s17 +; SI-NEXT: v_mov_b32_e32 v28, s16 +; SI-NEXT: v_mov_b32_e32 v26, s18 +; SI-NEXT: v_mov_b32_e32 v24, s20 +; SI-NEXT: v_mov_b32_e32 v22, s22 +; SI-NEXT: v_mov_b32_e32 v20, s24 +; SI-NEXT: v_mov_b32_e32 v19, s40 +; SI-NEXT: v_mov_b32_e32 v15, s29 +; SI-NEXT: v_mov_b32_e32 v11, s28 +; SI-NEXT: v_mov_b32_e32 v7, s27 +; SI-NEXT: v_mov_b32_e32 v3, s26 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v9, s8 +; SI-NEXT: v_mov_b32_e32 v13, s6 +; SI-NEXT: v_mov_b32_e32 v17, s4 ; SI-NEXT: .LBB55_5: ; %end -; SI-NEXT: v_mov_b32_e32 v2, v1 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v14, v13 -; SI-NEXT: v_mov_b32_e32 v18, v17 -; SI-NEXT: v_mov_b32_e32 v1, v24 -; SI-NEXT: v_mov_b32_e32 v5, v23 -; SI-NEXT: v_mov_b32_e32 v9, v22 -; SI-NEXT: v_mov_b32_e32 v13, v21 -; SI-NEXT: v_mov_b32_e32 v17, v20 +; SI-NEXT: v_mov_b32_e32 v0, v28 +; SI-NEXT: v_mov_b32_e32 v2, v29 +; SI-NEXT: v_mov_b32_e32 v4, v26 +; SI-NEXT: v_mov_b32_e32 v6, v27 +; SI-NEXT: v_mov_b32_e32 v8, v24 +; SI-NEXT: v_mov_b32_e32 v10, v25 +; SI-NEXT: v_mov_b32_e32 v12, v22 +; SI-NEXT: v_mov_b32_e32 v14, v23 +; SI-NEXT: v_mov_b32_e32 v16, v20 +; SI-NEXT: v_mov_b32_e32 v18, v21 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v5f64_to_v20i16_scalar: @@ -21435,76 +21464,71 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b32 s26, s25, 16 +; SI-NEXT: s_lshr_b32 s27, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s29, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 ; SI-NEXT: s_add_u32 s24, s24, 3 ; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s26, s25, 16 +; SI-NEXT: s_lshr_b32 s27, s23, 16 +; SI-NEXT: s_lshr_b32 s28, s21, 16 +; SI-NEXT: s_lshr_b32 s29, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 16 ; SI-NEXT: .LBB59_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s12 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v3, s40 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s10 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v7, s29 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s8 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v11, s28 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s6 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s7 +; SI-NEXT: v_mov_b32_e32 v15, s27 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s4 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s6 +; SI-NEXT: v_mov_b32_e32 v19, s26 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr28 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr26 ; SI-NEXT: s_branch .LBB59_2 ; ; VI-LABEL: bitcast_v5i64_to_v20i16_scalar: @@ -22779,357 +22803,375 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32 ; SI-LABEL: bitcast_v20f16_to_v40i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v16, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v15, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v13, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v39, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v33, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v9, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v54, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v53, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s24 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v50, s26 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v43, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v17, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s29 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v1 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v47, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v46, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v44, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, s28 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB61_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v16 -; SI-NEXT: v_or_b32_e32 v28, v15, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v10 -; SI-NEXT: v_or_b32_e32 v24, v12, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v39 -; SI-NEXT: v_or_b32_e32 v14, v33, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v9 -; SI-NEXT: v_or_b32_e32 v13, v20, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v54 -; SI-NEXT: v_or_b32_e32 v7, v53, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v8 -; SI-NEXT: v_or_b32_e32 v11, v50, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v43 -; SI-NEXT: v_or_b32_e32 v5, v44, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; SI-NEXT: v_or_b32_e32 v6, v41, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 -; SI-NEXT: v_or_b32_e32 v4, v46, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; SI-NEXT: v_or_b32_e32 v3, v45, v3 -; SI-NEXT: v_alignbit_b32 v30, v24, v28, 24 -; SI-NEXT: v_alignbit_b32 v35, v24, v28, 16 -; SI-NEXT: v_alignbit_b32 v37, v24, v28, 8 -; SI-NEXT: v_alignbit_b32 v29, v13, v14, 24 -; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v36, v13, v14, 8 -; SI-NEXT: v_alignbit_b32 v23, v11, v7, 24 -; SI-NEXT: v_alignbit_b32 v26, v11, v7, 16 -; SI-NEXT: v_alignbit_b32 v32, v11, v7, 8 -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24 -; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 8 -; SI-NEXT: v_alignbit_b32 v17, v3, v4, 24 -; SI-NEXT: v_alignbit_b32 v18, v3, v4, 16 -; SI-NEXT: v_alignbit_b32 v22, v3, v4, 8 -; SI-NEXT: v_lshrrev_b32_e32 v40, 8, v24 -; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v13 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v38, 8, v6 -; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v3 -; SI-NEXT: v_bfe_u32 v42, v10, 8, 8 -; SI-NEXT: v_bfe_u32 v55, v9, 8, 8 -; SI-NEXT: v_bfe_u32 v51, v8, 8, 8 -; SI-NEXT: v_bfe_u32 v48, v2, 8, 8 -; SI-NEXT: v_bfe_u32 v34, v1, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s4, v13 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v12 +; SI-NEXT: s_or_b32 s12, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v10 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v11 +; SI-NEXT: s_or_b32 s13, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v16 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v15 +; SI-NEXT: s_or_b32 s10, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v9 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: s_or_b32 s11, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v19 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v18 +; SI-NEXT: s_or_b32 s8, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v8 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v17 +; SI-NEXT: s_or_b32 s9, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v21 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v5 +; SI-NEXT: s_or_b32 s6, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v20 +; SI-NEXT: s_or_b32 s7, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v24 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v23 +; SI-NEXT: s_lshr_b64 s[14:15], s[12:13], 24 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_readfirstlane_b32 s15, v22 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[20:21], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[18:19], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[22:23], s[10:11], 16 +; SI-NEXT: s_or_b32 s5, s15, s5 +; SI-NEXT: s_lshr_b64 s[26:27], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s23, s13, 8 +; SI-NEXT: s_lshr_b32 s21, s11, 8 +; SI-NEXT: s_lshr_b32 s19, s9, 8 +; SI-NEXT: s_lshr_b32 s17, s7, 8 +; SI-NEXT: s_lshr_b32 s15, s5, 8 +; SI-NEXT: v_bfe_u32 v25, v10, 8, 8 +; SI-NEXT: v_bfe_u32 v7, v9, 8, 8 +; SI-NEXT: v_bfe_u32 v6, v8, 8, 8 +; SI-NEXT: v_bfe_u32 v4, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v3, v1, 8, 8 ; SI-NEXT: s_cbranch_execnz .LBB61_3 ; SI-NEXT: .LBB61_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v3, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_readfirstlane_b32 s4, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v22 +; SI-NEXT: v_readfirstlane_b32 s5, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v4, v4, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v43 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; SI-NEXT: v_or_b32_e32 v3, v5, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v54 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s6, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v5 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_readfirstlane_b32 s6, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v19 +; SI-NEXT: v_readfirstlane_b32 s7, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v20 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v53 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v50 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v33 -; SI-NEXT: v_or_b32_e32 v7, v13, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_or_b32_e32 v11, v11, v14 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_or_b32_e32 v14, v14, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v9 -; SI-NEXT: v_or_b32_e32 v28, v15, v16 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v10 -; SI-NEXT: v_or_b32_e32 v13, v17, v13 -; SI-NEXT: v_or_b32_e32 v24, v12, v15 -; SI-NEXT: v_alignbit_b32 v30, v24, v28, 24 -; SI-NEXT: v_alignbit_b32 v35, v24, v28, 16 -; SI-NEXT: v_alignbit_b32 v37, v24, v28, 8 -; SI-NEXT: v_alignbit_b32 v29, v13, v14, 24 -; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v36, v13, v14, 8 -; SI-NEXT: v_alignbit_b32 v23, v11, v7, 24 -; SI-NEXT: v_alignbit_b32 v26, v11, v7, 16 -; SI-NEXT: v_alignbit_b32 v32, v11, v7, 8 -; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24 -; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 8 -; SI-NEXT: v_alignbit_b32 v17, v3, v4, 24 -; SI-NEXT: v_alignbit_b32 v18, v3, v4, 16 -; SI-NEXT: v_alignbit_b32 v22, v3, v4, 8 -; SI-NEXT: v_lshrrev_b32_e32 v40, 8, v24 -; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v13 -; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v38, 8, v6 -; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v3 -; SI-NEXT: v_bfe_u32 v42, v10, 8, 8 -; SI-NEXT: v_bfe_u32 v55, v9, 8, 8 -; SI-NEXT: v_bfe_u32 v51, v8, 8, 8 -; SI-NEXT: v_bfe_u32 v48, v2, 8, 8 -; SI-NEXT: v_bfe_u32 v34, v1, 8, 8 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readfirstlane_b32 s8, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_readfirstlane_b32 s7, v2 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_readfirstlane_b32 s8, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v8 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: v_readfirstlane_b32 s9, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v17 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v16 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_readfirstlane_b32 s10, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v15 +; SI-NEXT: v_readfirstlane_b32 s9, v8 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_readfirstlane_b32 s10, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v12 +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_readfirstlane_b32 s11, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v13 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_readfirstlane_b32 s11, v9 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: v_readfirstlane_b32 s12, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v5 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_readfirstlane_b32 s12, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v11 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: v_readfirstlane_b32 s13, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readfirstlane_b32 s13, v10 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: v_readfirstlane_b32 s14, v3 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: s_lshr_b64 s[14:15], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[20:21], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[18:19], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[22:23], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s23, s13, 8 +; SI-NEXT: s_lshr_b32 s21, s11, 8 +; SI-NEXT: s_lshr_b32 s19, s9, 8 +; SI-NEXT: s_lshr_b32 s17, s7, 8 +; SI-NEXT: s_lshr_b32 s15, s5, 8 +; SI-NEXT: v_bfe_u32 v25, v10, 8, 8 +; SI-NEXT: v_bfe_u32 v7, v9, 8, 8 +; SI-NEXT: v_bfe_u32 v6, v8, 8, 8 +; SI-NEXT: v_bfe_u32 v4, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v3, v1, 8, 8 ; SI-NEXT: .LBB61_3: ; %end -; SI-NEXT: v_and_b32_e32 v12, 0xff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v37 -; SI-NEXT: v_or_b32_e32 v12, v12, v15 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v30 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v15, v16, v15 -; SI-NEXT: v_or_b32_e32 v12, v12, v15 -; SI-NEXT: buffer_store_dword v12, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v40 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_or_b32_e32 v12, v12, v15 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v42 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v15, v10 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v12, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v36 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_and_b32_e32 v12, 0xff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v29 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v12, v14, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v52 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v55 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v9, v12, v9 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v32 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v23 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s20, s20, 8 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_or_b32 s12, s12, s20 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: s_lshl_b32 s13, s23, 8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v49 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v51 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v8, v9, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v10 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v25 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: v_or_b32_e32 v5, v10, v5 +; SI-NEXT: v_or_b32_e32 v5, s12, v5 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s12, s26, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: s_and_b32 s12, s22, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s18, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_add_i32_e32 v10, vcc, 4, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v5, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v27 -; SI-NEXT: v_or_b32_e32 v5, v5, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v19 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v5, v5, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v5, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v10, s10 +; SI-NEXT: buffer_store_dword v10, v5, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s21, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v9 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: v_or_b32_e32 v5, v7, v5 +; SI-NEXT: v_or_b32_e32 v5, s10, v5 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s40, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s28, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s24, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 ; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v38 +; SI-NEXT: v_add_i32_e32 v5, vcc, 16, v0 +; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: buffer_store_dword v7, v5, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s19, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v5, s8, v5 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s44, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s42, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s46, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v6, vcc, 20, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v6, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v48 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v2, v6, v2 -; SI-NEXT: v_or_b32_e32 v2, v5, v2 -; SI-NEXT: v_add_i32_e32 v5, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v2, v5, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: v_or_b32_e32 v2, v4, v2 +; SI-NEXT: v_or_b32_e32 v2, s6, v2 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s60, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s58, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s56, 24 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v22 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v17 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v2, v2, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v25 +; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s15, 8 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v34 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB61_4: -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr21 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr19 ; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr17 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB61_2 ; ; VI-LABEL: bitcast_v20f16_to_v40i8_scalar: @@ -31153,232 +31195,246 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB75_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s24 -; SI-NEXT: v_alignbit_b32 v2, s25, v1, 24 -; SI-NEXT: v_alignbit_b32 v11, s25, v1, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s22 -; SI-NEXT: v_alignbit_b32 v4, s23, v1, 24 -; SI-NEXT: v_alignbit_b32 v13, s23, v1, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s20 -; SI-NEXT: v_alignbit_b32 v6, s21, v1, 24 -; SI-NEXT: v_alignbit_b32 v15, s21, v1, 16 -; SI-NEXT: v_alignbit_b32 v16, s21, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s18 -; SI-NEXT: v_alignbit_b32 v8, s19, v1, 24 -; SI-NEXT: v_alignbit_b32 v10, s19, v1, 16 -; SI-NEXT: v_alignbit_b32 v17, s19, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s16 -; SI-NEXT: v_alignbit_b32 v18, s17, v1, 24 -; SI-NEXT: v_alignbit_b32 v19, s17, v1, 16 -; SI-NEXT: v_alignbit_b32 v20, s17, v1, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB75_4 ; SI-NEXT: .LBB75_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[9:10], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[7:8], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[5:6], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[15:16], s[18:19], 1.0 +; SI-NEXT: v_lshr_b64 v[22:23], v[8:9], 8 ; SI-NEXT: v_add_f64 v[1:2], s[24:25], 1.0 +; SI-NEXT: v_lshr_b64 v[23:24], v[15:16], 24 ; SI-NEXT: v_add_f64 v[3:4], s[22:23], 1.0 -; SI-NEXT: v_readfirstlane_b32 s25, v2 +; SI-NEXT: v_add_f64 v[20:21], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[24:25], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 8 +; SI-NEXT: v_lshr_b64 v[25:26], v[15:16], 8 +; SI-NEXT: v_lshr_b64 v[11:12], v[3:4], 24 +; SI-NEXT: v_lshr_b64 v[26:27], v[20:21], 24 +; SI-NEXT: v_lshr_b64 v[5:6], v[1:2], 24 +; SI-NEXT: v_lshr_b64 v[12:13], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[8:9], 24 +; SI-NEXT: v_lshr_b64 v[27:28], v[20:21], 16 +; SI-NEXT: v_readfirstlane_b32 s17, v21 +; SI-NEXT: v_readfirstlane_b32 s19, v16 +; SI-NEXT: v_readfirstlane_b32 s21, v9 ; SI-NEXT: v_readfirstlane_b32 s23, v4 -; SI-NEXT: v_readfirstlane_b32 s21, v6 -; SI-NEXT: v_readfirstlane_b32 s19, v8 -; SI-NEXT: v_readfirstlane_b32 s17, v10 -; SI-NEXT: v_alignbit_b32 v2, s25, v1, 24 -; SI-NEXT: v_alignbit_b32 v11, s25, v1, 16 -; SI-NEXT: v_alignbit_b32 v12, s25, v1, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v3, 24 -; SI-NEXT: v_alignbit_b32 v13, s23, v3, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v3, 8 -; SI-NEXT: v_alignbit_b32 v6, s21, v5, 24 -; SI-NEXT: v_alignbit_b32 v15, s21, v5, 16 -; SI-NEXT: v_alignbit_b32 v16, s21, v5, 8 -; SI-NEXT: v_alignbit_b32 v8, s19, v7, 24 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v7, 16 -; SI-NEXT: v_alignbit_b32 v17, s19, v7, 8 -; SI-NEXT: v_alignbit_b32 v18, s17, v9, 24 -; SI-NEXT: v_alignbit_b32 v19, s17, v9, 16 -; SI-NEXT: v_alignbit_b32 v20, s17, v9, 8 +; SI-NEXT: v_readfirstlane_b32 s25, v2 +; SI-NEXT: v_lshr_b64 v[6:7], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[3:4], 8 +; SI-NEXT: v_lshr_b64 v[18:19], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[20:21], 8 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 ; SI-NEXT: s_branch .LBB75_5 ; SI-NEXT: .LBB75_3: -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr28 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: ; implicit-def: $sgpr26 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB75_2 ; SI-NEXT: .LBB75_4: -; SI-NEXT: v_mov_b32_e32 v1, s24 +; SI-NEXT: v_mov_b32_e32 v20, s16 +; SI-NEXT: v_mov_b32_e32 v15, s18 +; SI-NEXT: v_mov_b32_e32 v8, s20 ; SI-NEXT: v_mov_b32_e32 v3, s22 -; SI-NEXT: v_mov_b32_e32 v5, s20 -; SI-NEXT: v_mov_b32_e32 v7, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 +; SI-NEXT: v_mov_b32_e32 v1, s24 +; SI-NEXT: v_mov_b32_e32 v28, s60 +; SI-NEXT: v_mov_b32_e32 v27, s58 +; SI-NEXT: v_mov_b32_e32 v26, s56 +; SI-NEXT: v_mov_b32_e32 v25, s46 +; SI-NEXT: v_mov_b32_e32 v24, s44 +; SI-NEXT: v_mov_b32_e32 v23, s42 +; SI-NEXT: v_mov_b32_e32 v22, s40 +; SI-NEXT: v_mov_b32_e32 v18, s28 +; SI-NEXT: v_mov_b32_e32 v17, s26 +; SI-NEXT: v_mov_b32_e32 v13, s14 +; SI-NEXT: v_mov_b32_e32 v12, s12 +; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v10, s6 +; SI-NEXT: v_mov_b32_e32 v6, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 ; SI-NEXT: .LBB75_5: ; %end +; SI-NEXT: v_and_b32_e32 v2, 0xff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v28 ; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v20 -; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 +; SI-NEXT: s_lshl_b32 s5, s94, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v27 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s29, 0xff -; SI-NEXT: v_or_b32_e32 v9, v9, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v18 +; SI-NEXT: s_and_b32 s5, s93, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v26 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s28, 24 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: s_lshl_b32 s6, s92, 24 +; SI-NEXT: v_or_b32_e32 v4, v7, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v9, v9, v18 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v18, s4 -; SI-NEXT: buffer_store_dword v18, v9, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v17 +; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v15 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v25 ; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s27, 8 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v10 +; SI-NEXT: s_lshl_b32 s5, s91, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v24 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: s_and_b32 s5, s90, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v23 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s15, s15, 24 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 +; SI-NEXT: s_lshl_b32 s6, s89, 24 +; SI-NEXT: v_or_b32_e32 v4, v7, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s15, s5 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 8, v0 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 8, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v16 +; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v8 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v22 ; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s14, 8 -; SI-NEXT: v_or_b32_e32 v5, v5, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v15 +; SI-NEXT: s_lshl_b32 s5, s88, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v18 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: s_and_b32 s5, s79, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s12, 24 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 +; SI-NEXT: s_lshl_b32 s6, s78, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v4, v7, v4 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 16, v0 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 16, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v14 +; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v13 ; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s11, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v13 +; SI-NEXT: s_lshl_b32 s5, s77, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v12 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s10, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; SI-NEXT: s_and_b32 s5, s76, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v11 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s9, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: s_lshl_b32 s6, s75, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 24, v0 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v12 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v10 ; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v11 +; SI-NEXT: s_lshl_b32 s5, s74, 8 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v6 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; SI-NEXT: s_and_b32 s5, s73, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s6, 24 +; SI-NEXT: s_lshl_b32 s6, s72, 24 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -35291,241 +35347,239 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s26, 0 ; SI-NEXT: s_cbranch_scc0 .LBB79_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB79_3 ; SI-NEXT: .LBB79_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 ; SI-NEXT: s_add_u32 s24, s24, 3 ; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: v_mov_b32_e32 v3, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s25, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s25, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s25, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s23, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s21, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 8 -; SI-NEXT: v_alignbit_b32 v10, s19, v12, 24 -; SI-NEXT: v_alignbit_b32 v11, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 8 -; SI-NEXT: v_alignbit_b32 v13, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v14, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 8 -; SI-NEXT: s_lshr_b32 s6, s25, 24 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 8 -; SI-NEXT: s_lshr_b32 s9, s23, 24 -; SI-NEXT: s_lshr_b32 s10, s23, 16 -; SI-NEXT: s_lshr_b32 s11, s23, 8 -; SI-NEXT: s_lshr_b32 s12, s21, 24 -; SI-NEXT: s_lshr_b32 s13, s21, 16 -; SI-NEXT: s_lshr_b32 s14, s21, 8 -; SI-NEXT: s_lshr_b32 s15, s19, 24 -; SI-NEXT: s_lshr_b32 s26, s19, 16 -; SI-NEXT: s_lshr_b32 s27, s19, 8 -; SI-NEXT: s_lshr_b32 s28, s17, 24 -; SI-NEXT: s_lshr_b32 s29, s17, 16 -; SI-NEXT: s_lshr_b32 s40, s17, 8 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s72, s25, 24 +; SI-NEXT: s_lshr_b32 s73, s25, 16 +; SI-NEXT: s_lshr_b32 s74, s25, 8 +; SI-NEXT: s_lshr_b32 s75, s23, 24 +; SI-NEXT: s_lshr_b32 s76, s23, 16 +; SI-NEXT: s_lshr_b32 s77, s23, 8 +; SI-NEXT: s_lshr_b32 s78, s21, 24 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s21, 8 +; SI-NEXT: s_lshr_b32 s89, s19, 24 +; SI-NEXT: s_lshr_b32 s90, s19, 16 +; SI-NEXT: s_lshr_b32 s91, s19, 8 +; SI-NEXT: s_lshr_b32 s92, s17, 24 +; SI-NEXT: s_lshr_b32 s93, s17, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 8 ; SI-NEXT: .LBB79_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s28, 24 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s27, 8 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s26, 0xff -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s15, s15, 24 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s15, s5 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s5, s60, 8 +; SI-NEXT: s_and_b32 s7, s16, 0xff +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: s_and_b32 s7, s58, 0xff +; SI-NEXT: s_lshl_b32 s9, s56, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: s_and_b32 s5, s17, 0xff +; SI-NEXT: s_lshl_b32 s7, s94, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s93, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s92, 24 +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_lshl_b32 s5, s46, 8 +; SI-NEXT: s_and_b32 s7, s18, 0xff +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: s_and_b32 s7, s44, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s42, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s14, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s13, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s12, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s11, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s10, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s9, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s19, 0xff +; SI-NEXT: s_lshl_b32 s7, s91, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s90, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s89, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s40, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s26, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s21, 0xff +; SI-NEXT: s_lshl_b32 s7, s88, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s79, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s78, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_lshl_b32 s7, s14, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s12, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s10, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s23, 0xff +; SI-NEXT: s_lshl_b32 s7, s77, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s76, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s9, s75, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: s_and_b32 s5, s24, 0xff +; SI-NEXT: s_lshl_b32 s7, s8, 8 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 ; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_lshl_b32 s5, s74, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_and_b32 s5, s73, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s6, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s72, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 36, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB79_4: -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr29 ; SI-NEXT: ; implicit-def: $sgpr28 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr27 ; SI-NEXT: ; implicit-def: $sgpr26 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB79_2 ; ; VI-LABEL: bitcast_v5i64_to_v40i8_scalar: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll index 6fc9a35cd9ee6..f335b48ba4ae1 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll @@ -716,83 +716,77 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s27, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s25, 16 +; SI-NEXT: s_lshr_b32 s40, s23, 16 +; SI-NEXT: s_lshr_b32 s41, s21, 16 +; SI-NEXT: s_lshr_b32 s42, s19, 16 +; SI-NEXT: s_lshr_b32 s43, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 ; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s25, 16 -; SI-NEXT: s_lshr_b32 s7, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s9, s19, 16 -; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_lshr_b32 s27, s25, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s40, s23, 16 +; SI-NEXT: s_lshr_b32 s41, s21, 16 +; SI-NEXT: s_lshr_b32 s42, s19, 16 +; SI-NEXT: s_lshr_b32 s43, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v3, s43 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s12 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s9 +; SI-NEXT: v_mov_b32_e32 v7, s42 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s8 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s8 +; SI-NEXT: v_mov_b32_e32 v11, s41 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s6 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s7 +; SI-NEXT: v_mov_b32_e32 v15, s40 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s4 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s6 +; SI-NEXT: v_mov_b32_e32 v19, s27 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v11i32_to_v22i16_scalar: @@ -2676,78 +2670,89 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a, ; SI-NEXT: s_cmp_lg_u32 s27, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s27, s17, 16 +; SI-NEXT: s_lshr_b32 s43, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s41, s21, 16 +; SI-NEXT: s_lshr_b32 s40, s19, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_4 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v20, s26, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v21, s4, v20, 16 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v32, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v31, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v30, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v27, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v26, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v24, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v23, s24, 1.0 +; SI-NEXT: v_lshr_b64 v[17:18], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[27:28], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[31:32], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[20:21], 16 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v32 +; SI-NEXT: s_branch .LBB13_5 ; SI-NEXT: .LBB13_3: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: s_branch .LBB13_2 ; SI-NEXT: .LBB13_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 +; SI-NEXT: v_mov_b32_e32 v31, s16 +; SI-NEXT: v_mov_b32_e32 v32, s17 +; SI-NEXT: v_mov_b32_e32 v29, s18 +; SI-NEXT: v_mov_b32_e32 v30, s19 +; SI-NEXT: v_mov_b32_e32 v27, s20 +; SI-NEXT: v_mov_b32_e32 v28, s21 +; SI-NEXT: v_mov_b32_e32 v25, s22 +; SI-NEXT: v_mov_b32_e32 v26, s23 +; SI-NEXT: v_mov_b32_e32 v23, s24 +; SI-NEXT: v_mov_b32_e32 v24, s25 ; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 +; SI-NEXT: v_mov_b32_e32 v3, s27 +; SI-NEXT: v_mov_b32_e32 v7, s40 +; SI-NEXT: v_mov_b32_e32 v11, s41 +; SI-NEXT: v_mov_b32_e32 v15, s42 +; SI-NEXT: v_mov_b32_e32 v19, s43 +; SI-NEXT: v_mov_b32_e32 v21, s8 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: v_mov_b32_e32 v9, s10 +; SI-NEXT: v_mov_b32_e32 v13, s6 +; SI-NEXT: v_mov_b32_e32 v17, s4 +; SI-NEXT: .LBB13_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v31 +; SI-NEXT: v_mov_b32_e32 v2, v32 +; SI-NEXT: v_mov_b32_e32 v4, v29 +; SI-NEXT: v_mov_b32_e32 v6, v30 +; SI-NEXT: v_mov_b32_e32 v8, v27 +; SI-NEXT: v_mov_b32_e32 v10, v28 +; SI-NEXT: v_mov_b32_e32 v12, v25 +; SI-NEXT: v_mov_b32_e32 v14, v26 +; SI-NEXT: v_mov_b32_e32 v16, v23 +; SI-NEXT: v_mov_b32_e32 v18, v24 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v11f32_to_v22i16_scalar: @@ -5293,136 +5298,137 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i ; SI-LABEL: bitcast_v22f16_to_v22i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v12, v7 -; SI-NEXT: v_mov_b32_e32 v13, v6 -; SI-NEXT: v_mov_b32_e32 v19, v5 -; SI-NEXT: v_mov_b32_e32 v18, v4 -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 +; SI-NEXT: v_mov_b32_e32 v9, v7 +; SI-NEXT: v_mov_b32_e32 v10, v4 +; SI-NEXT: v_mov_b32_e32 v13, v3 ; SI-NEXT: v_mov_b32_e32 v14, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v26, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB23_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB23_3 ; SI-NEXT: .LBB23_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v23 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v24 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v25 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v26 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_or_b32_e32 v20, v20, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_or_b32_e32 v18, v18, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 ; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_or_b32_e32 v14, v14, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v11 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_or_b32_e32 v10, v10, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v22 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v19 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_or_b32_e32 v18, v18, v22 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_lshr_b64 v[26:27], v[13:14], 16 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v21 +; SI-NEXT: v_lshr_b64 v[27:28], v[17:18], 16 +; SI-NEXT: v_or_b32_e32 v20, v20, v22 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 ; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 ; SI-NEXT: .LBB23_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v23 +; SI-NEXT: v_mov_b32_e32 v5, v24 +; SI-NEXT: v_mov_b32_e32 v9, v25 +; SI-NEXT: v_mov_b32_e32 v13, v26 +; SI-NEXT: v_mov_b32_e32 v17, v27 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB23_4: ; SI-NEXT: s_branch .LBB23_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll index c9860dbb7d72c..2cde373ec130c 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll @@ -1824,89 +1824,83 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3 ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s27, 16 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s21, 16 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b32 s40, s27, 16 +; SI-NEXT: s_lshr_b32 s41, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s43, s21, 16 +; SI-NEXT: s_lshr_b32 s44, s19, 16 +; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s27, 16 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s21, 16 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s40, s27, 16 +; SI-NEXT: s_lshr_b32 s41, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s43, s21, 16 +; SI-NEXT: s_lshr_b32 s44, s19, 16 +; SI-NEXT: s_lshr_b32 s45, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s11 +; SI-NEXT: v_mov_b32_e32 v3, s45 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s12 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s10 +; SI-NEXT: v_mov_b32_e32 v7, s44 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s10 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s9 +; SI-NEXT: v_mov_b32_e32 v11, s43 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s8 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s8 +; SI-NEXT: v_mov_b32_e32 v15, s42 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s6 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s7 +; SI-NEXT: v_mov_b32_e32 v19, s41 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s4 ; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v23, s6 +; SI-NEXT: v_mov_b32_e32 v23, s40 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v12i32_to_v24i16_scalar: @@ -5008,84 +5002,97 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a, ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB29_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s11, s27, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s44, s25, 16 +; SI-NEXT: s_lshr_b32 s43, s23, 16 +; SI-NEXT: s_lshr_b32 s42, s21, 16 +; SI-NEXT: s_lshr_b32 s41, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_4 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_add_f32_e64 v22, s27, 1.0 -; SI-NEXT: v_add_f32_e64 v20, s26, 1.0 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v35, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v34, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v33, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v32, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v31, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v30, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v27, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v26, s24, 1.0 +; SI-NEXT: v_add_f32_e64 v25, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v24, s26, 1.0 +; SI-NEXT: v_lshr_b64 v[21:22], v[24:25], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[26:27], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[30:31], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 +; SI-NEXT: s_branch .LBB29_5 ; SI-NEXT: .LBB29_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: s_branch .LBB29_2 ; SI-NEXT: .LBB29_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 -; SI-NEXT: v_mov_b32_e32 v23, s11 +; SI-NEXT: v_mov_b32_e32 v34, s16 +; SI-NEXT: v_mov_b32_e32 v35, s17 +; SI-NEXT: v_mov_b32_e32 v32, s18 +; SI-NEXT: v_mov_b32_e32 v33, s19 +; SI-NEXT: v_mov_b32_e32 v30, s20 +; SI-NEXT: v_mov_b32_e32 v31, s21 +; SI-NEXT: v_mov_b32_e32 v28, s22 +; SI-NEXT: v_mov_b32_e32 v29, s23 +; SI-NEXT: v_mov_b32_e32 v26, s24 +; SI-NEXT: v_mov_b32_e32 v27, s25 +; SI-NEXT: v_mov_b32_e32 v24, s26 +; SI-NEXT: v_mov_b32_e32 v25, s27 +; SI-NEXT: v_mov_b32_e32 v3, s40 +; SI-NEXT: v_mov_b32_e32 v7, s41 +; SI-NEXT: v_mov_b32_e32 v11, s42 +; SI-NEXT: v_mov_b32_e32 v15, s43 +; SI-NEXT: v_mov_b32_e32 v19, s44 +; SI-NEXT: v_mov_b32_e32 v23, s45 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: v_mov_b32_e32 v9, s10 +; SI-NEXT: v_mov_b32_e32 v13, s8 +; SI-NEXT: v_mov_b32_e32 v17, s6 +; SI-NEXT: v_mov_b32_e32 v21, s4 +; SI-NEXT: .LBB29_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v34 +; SI-NEXT: v_mov_b32_e32 v2, v35 +; SI-NEXT: v_mov_b32_e32 v4, v32 +; SI-NEXT: v_mov_b32_e32 v6, v33 +; SI-NEXT: v_mov_b32_e32 v8, v30 +; SI-NEXT: v_mov_b32_e32 v10, v31 +; SI-NEXT: v_mov_b32_e32 v12, v28 +; SI-NEXT: v_mov_b32_e32 v14, v29 +; SI-NEXT: v_mov_b32_e32 v16, v26 +; SI-NEXT: v_mov_b32_e32 v18, v27 +; SI-NEXT: v_mov_b32_e32 v20, v24 +; SI-NEXT: v_mov_b32_e32 v22, v25 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v12f32_to_v24i16_scalar: @@ -7630,91 +7637,91 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB41_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v24, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v25, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v26, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v27, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v28, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v29, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s11, s27, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s44, s25, 16 +; SI-NEXT: s_lshr_b32 s43, s23, 16 +; SI-NEXT: s_lshr_b32 s42, s21, 16 +; SI-NEXT: s_lshr_b32 s41, s19, 16 +; SI-NEXT: s_lshr_b32 s40, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_4 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[4:5], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[12:13], s[22:23], 1.0 -; SI-NEXT: v_add_f64 v[20:21], s[26:27], 1.0 -; SI-NEXT: v_add_f64 v[16:17], s[24:25], 1.0 -; SI-NEXT: v_alignbit_b32 v24, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v25, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v26, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v27, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v28, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v29, v1, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_add_f64 v[24:25], s[26:27], 1.0 +; SI-NEXT: v_add_f64 v[26:27], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[28:29], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[30:31], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[32:33], s[18:19], 1.0 +; SI-NEXT: v_add_f64 v[34:35], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[21:22], v[24:25], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[26:27], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[30:31], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 ; SI-NEXT: s_branch .LBB41_5 ; SI-NEXT: .LBB41_3: -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr43 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: s_branch .LBB41_2 ; SI-NEXT: .LBB41_4: -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v5, s19 -; SI-NEXT: v_mov_b32_e32 v9, s21 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v21, s27 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 -; SI-NEXT: v_mov_b32_e32 v23, s11 +; SI-NEXT: v_mov_b32_e32 v25, s27 +; SI-NEXT: v_mov_b32_e32 v27, s25 +; SI-NEXT: v_mov_b32_e32 v29, s23 +; SI-NEXT: v_mov_b32_e32 v31, s21 +; SI-NEXT: v_mov_b32_e32 v33, s19 +; SI-NEXT: v_mov_b32_e32 v35, s17 +; SI-NEXT: v_mov_b32_e32 v34, s16 +; SI-NEXT: v_mov_b32_e32 v32, s18 +; SI-NEXT: v_mov_b32_e32 v30, s20 +; SI-NEXT: v_mov_b32_e32 v28, s22 +; SI-NEXT: v_mov_b32_e32 v26, s24 +; SI-NEXT: v_mov_b32_e32 v24, s26 +; SI-NEXT: v_mov_b32_e32 v23, s45 +; SI-NEXT: v_mov_b32_e32 v19, s44 +; SI-NEXT: v_mov_b32_e32 v15, s43 +; SI-NEXT: v_mov_b32_e32 v11, s42 +; SI-NEXT: v_mov_b32_e32 v7, s41 +; SI-NEXT: v_mov_b32_e32 v3, s40 +; SI-NEXT: v_mov_b32_e32 v1, s14 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: v_mov_b32_e32 v9, s10 +; SI-NEXT: v_mov_b32_e32 v13, s8 +; SI-NEXT: v_mov_b32_e32 v17, s6 +; SI-NEXT: v_mov_b32_e32 v21, s4 ; SI-NEXT: .LBB41_5: ; %end -; SI-NEXT: v_mov_b32_e32 v2, v1 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v14, v13 -; SI-NEXT: v_mov_b32_e32 v18, v17 -; SI-NEXT: v_mov_b32_e32 v22, v21 -; SI-NEXT: v_mov_b32_e32 v1, v29 -; SI-NEXT: v_mov_b32_e32 v5, v28 -; SI-NEXT: v_mov_b32_e32 v9, v27 -; SI-NEXT: v_mov_b32_e32 v13, v26 -; SI-NEXT: v_mov_b32_e32 v17, v25 -; SI-NEXT: v_mov_b32_e32 v21, v24 +; SI-NEXT: v_mov_b32_e32 v0, v34 +; SI-NEXT: v_mov_b32_e32 v2, v35 +; SI-NEXT: v_mov_b32_e32 v4, v32 +; SI-NEXT: v_mov_b32_e32 v6, v33 +; SI-NEXT: v_mov_b32_e32 v8, v30 +; SI-NEXT: v_mov_b32_e32 v10, v31 +; SI-NEXT: v_mov_b32_e32 v12, v28 +; SI-NEXT: v_mov_b32_e32 v14, v29 +; SI-NEXT: v_mov_b32_e32 v16, v26 +; SI-NEXT: v_mov_b32_e32 v18, v27 +; SI-NEXT: v_mov_b32_e32 v20, v24 +; SI-NEXT: v_mov_b32_e32 v22, v25 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v6f64_to_v24i16_scalar: @@ -9690,89 +9697,83 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s27, 16 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s21, 16 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_lshr_b32 s40, s27, 16 +; SI-NEXT: s_lshr_b32 s41, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s43, s21, 16 +; SI-NEXT: s_lshr_b32 s44, s19, 16 +; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 ; SI-NEXT: s_add_u32 s26, s26, 3 ; SI-NEXT: s_addc_u32 s27, s27, 0 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s27, 16 -; SI-NEXT: s_lshr_b32 s7, s25, 16 -; SI-NEXT: s_lshr_b32 s8, s23, 16 -; SI-NEXT: s_lshr_b32 s9, s21, 16 -; SI-NEXT: s_lshr_b32 s10, s19, 16 -; SI-NEXT: s_lshr_b32 s11, s17, 16 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s40, s27, 16 +; SI-NEXT: s_lshr_b32 s41, s25, 16 +; SI-NEXT: s_lshr_b32 s42, s23, 16 +; SI-NEXT: s_lshr_b32 s43, s21, 16 +; SI-NEXT: s_lshr_b32 s44, s19, 16 +; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[16:17], 16 ; SI-NEXT: .LBB49_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s14 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s11 +; SI-NEXT: v_mov_b32_e32 v3, s45 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s12 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s10 +; SI-NEXT: v_mov_b32_e32 v7, s44 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s10 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s9 +; SI-NEXT: v_mov_b32_e32 v11, s43 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s8 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s8 +; SI-NEXT: v_mov_b32_e32 v15, s42 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s6 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s7 +; SI-NEXT: v_mov_b32_e32 v19, s41 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s4 ; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v23, s6 +; SI-NEXT: v_mov_b32_e32 v23, s40 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $sgpr42 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr41 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v6i64_to_v24i16_scalar: @@ -12464,149 +12465,150 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i ; SI-LABEL: bitcast_v24f16_to_v24i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v11, v9 -; SI-NEXT: v_mov_b32_e32 v12, v8 -; SI-NEXT: v_mov_b32_e32 v13, v7 -; SI-NEXT: v_mov_b32_e32 v20, v6 -; SI-NEXT: v_mov_b32_e32 v19, v5 +; SI-NEXT: v_mov_b32_e32 v13, v8 +; SI-NEXT: v_mov_b32_e32 v17, v7 ; SI-NEXT: v_mov_b32_e32 v18, v4 -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 +; SI-NEXT: v_mov_b32_e32 v19, v3 ; SI-NEXT: v_mov_b32_e32 v14, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v27, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v28, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v29, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_or_b32_e32 v22, v22, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v19 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v28 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v29 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v30 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v25 +; SI-NEXT: v_or_b32_e32 v18, v18, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_or_b32_e32 v18, v18, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_or_b32_e32 v14, v14, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v11 +; SI-NEXT: v_lshr_b64 v[27:28], v[1:2], 16 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_or_b32_e32 v10, v10, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v24 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 ; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v22, v22, v24 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[21:22], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 ; SI-NEXT: v_or_b32_e32 v16, v16, v17 ; SI-NEXT: v_or_b32_e32 v20, v20, v21 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 ; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v27 +; SI-NEXT: v_mov_b32_e32 v5, v25 +; SI-NEXT: v_mov_b32_e32 v9, v28 +; SI-NEXT: v_mov_b32_e32 v13, v29 +; SI-NEXT: v_mov_b32_e32 v17, v30 +; SI-NEXT: v_mov_b32_e32 v21, v31 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: ; SI-NEXT: s_branch .LBB59_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll index eaf314d4b65dc..718851f97bade 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll @@ -2004,102 +2004,95 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s29, 16 -; SI-NEXT: s_lshr_b32 s7, s27, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 16 -; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b32 s44, s29, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s56, s21, 16 +; SI-NEXT: s_lshr_b32 s57, s19, 16 +; SI-NEXT: s_lshr_b32 s58, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s29, 16 -; SI-NEXT: s_lshr_b32 s7, s27, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 16 -; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s44, s29, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s56, s21, 16 +; SI-NEXT: s_lshr_b32 s57, s19, 16 +; SI-NEXT: s_lshr_b32 s58, s17, 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s40 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v3, s58 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s14 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v7, s57 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s12 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v11, s56 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s10 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v15, s47 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s8 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s8 +; SI-NEXT: v_mov_b32_e32 v19, s46 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s6 ; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v23, s7 +; SI-NEXT: v_mov_b32_e32 v23, s45 ; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: v_mov_b32_e32 v25, s4 ; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: v_mov_b32_e32 v27, s6 +; SI-NEXT: v_mov_b32_e32 v27, s44 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr47 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr46 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v14i32_to_v28i16_scalar: @@ -5543,96 +5536,111 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB29_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s11, s27, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s58, s29, 16 +; SI-NEXT: s_lshr_b32 s57, s27, 16 +; SI-NEXT: s_lshr_b32 s56, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: s_lshr_b32 s45, s19, 16 +; SI-NEXT: s_lshr_b32 s44, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_4 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_add_f32_e64 v6, s19, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v10, s21, 1.0 -; SI-NEXT: v_add_f32_e64 v8, s20, 1.0 -; SI-NEXT: v_add_f32_e64 v14, s23, 1.0 -; SI-NEXT: v_add_f32_e64 v12, s22, 1.0 -; SI-NEXT: v_add_f32_e64 v18, s25, 1.0 -; SI-NEXT: v_add_f32_e64 v16, s24, 1.0 -; SI-NEXT: v_add_f32_e64 v22, s27, 1.0 -; SI-NEXT: v_add_f32_e64 v20, s26, 1.0 -; SI-NEXT: v_add_f32_e64 v26, s29, 1.0 -; SI-NEXT: v_add_f32_e64 v24, s28, 1.0 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v49, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v48, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v39, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v38, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v37, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v36, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v35, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v34, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v33, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v32, s24, 1.0 +; SI-NEXT: v_add_f32_e64 v31, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v30, s26, 1.0 +; SI-NEXT: v_add_f32_e64 v29, s29, 1.0 +; SI-NEXT: v_add_f32_e64 v28, s28, 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[30:31], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[34:35], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 16 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v35 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v49 +; SI-NEXT: s_branch .LBB29_5 ; SI-NEXT: .LBB29_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr47 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr58 ; SI-NEXT: s_branch .LBB29_2 ; SI-NEXT: .LBB29_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v24, s28 -; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 -; SI-NEXT: v_mov_b32_e32 v23, s11 -; SI-NEXT: v_mov_b32_e32 v27, s12 +; SI-NEXT: v_mov_b32_e32 v48, s16 +; SI-NEXT: v_mov_b32_e32 v49, s17 +; SI-NEXT: v_mov_b32_e32 v38, s18 +; SI-NEXT: v_mov_b32_e32 v39, s19 +; SI-NEXT: v_mov_b32_e32 v36, s20 +; SI-NEXT: v_mov_b32_e32 v37, s21 +; SI-NEXT: v_mov_b32_e32 v34, s22 +; SI-NEXT: v_mov_b32_e32 v35, s23 +; SI-NEXT: v_mov_b32_e32 v32, s24 +; SI-NEXT: v_mov_b32_e32 v33, s25 +; SI-NEXT: v_mov_b32_e32 v30, s26 +; SI-NEXT: v_mov_b32_e32 v31, s27 +; SI-NEXT: v_mov_b32_e32 v28, s28 +; SI-NEXT: v_mov_b32_e32 v29, s29 +; SI-NEXT: v_mov_b32_e32 v3, s44 +; SI-NEXT: v_mov_b32_e32 v7, s45 +; SI-NEXT: v_mov_b32_e32 v11, s46 +; SI-NEXT: v_mov_b32_e32 v15, s47 +; SI-NEXT: v_mov_b32_e32 v19, s56 +; SI-NEXT: v_mov_b32_e32 v23, s57 +; SI-NEXT: v_mov_b32_e32 v27, s58 +; SI-NEXT: v_mov_b32_e32 v25, s4 +; SI-NEXT: v_mov_b32_e32 v21, s6 +; SI-NEXT: v_mov_b32_e32 v17, s8 +; SI-NEXT: v_mov_b32_e32 v13, s10 +; SI-NEXT: v_mov_b32_e32 v9, s12 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v1, s40 +; SI-NEXT: .LBB29_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v2, v49 +; SI-NEXT: v_mov_b32_e32 v4, v38 +; SI-NEXT: v_mov_b32_e32 v6, v39 +; SI-NEXT: v_mov_b32_e32 v8, v36 +; SI-NEXT: v_mov_b32_e32 v10, v37 +; SI-NEXT: v_mov_b32_e32 v12, v34 +; SI-NEXT: v_mov_b32_e32 v14, v35 +; SI-NEXT: v_mov_b32_e32 v16, v32 +; SI-NEXT: v_mov_b32_e32 v18, v33 +; SI-NEXT: v_mov_b32_e32 v20, v30 +; SI-NEXT: v_mov_b32_e32 v22, v31 +; SI-NEXT: v_mov_b32_e32 v24, v28 +; SI-NEXT: v_mov_b32_e32 v26, v29 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v14f32_to_v28i16_scalar: @@ -8475,102 +8483,95 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s29, 16 -; SI-NEXT: s_lshr_b32 s7, s27, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 16 -; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_lshr_b32 s44, s29, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s56, s21, 16 +; SI-NEXT: s_lshr_b32 s57, s19, 16 +; SI-NEXT: s_lshr_b32 s58, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v25, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v21, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v17, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v13, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v9, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s29, 16 -; SI-NEXT: s_lshr_b32 s7, s27, 16 -; SI-NEXT: s_lshr_b32 s8, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s10, s21, 16 -; SI-NEXT: s_lshr_b32 s11, s19, 16 -; SI-NEXT: s_lshr_b32 s12, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s44, s29, 16 +; SI-NEXT: s_lshr_b32 s45, s27, 16 +; SI-NEXT: s_lshr_b32 s46, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s56, s21, 16 +; SI-NEXT: s_lshr_b32 s57, s19, 16 +; SI-NEXT: s_lshr_b32 s58, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s40 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s12 +; SI-NEXT: v_mov_b32_e32 v3, s58 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s14 ; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v7, s57 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s12 ; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v11, s56 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s10 ; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v15, s9 +; SI-NEXT: v_mov_b32_e32 v15, s47 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s8 ; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v19, s8 +; SI-NEXT: v_mov_b32_e32 v19, s46 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s6 ; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v23, s7 +; SI-NEXT: v_mov_b32_e32 v23, s45 ; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: v_mov_b32_e32 v25, s4 ; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: v_mov_b32_e32 v27, s6 +; SI-NEXT: v_mov_b32_e32 v27, s44 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr47 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $sgpr46 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v7i64_to_v28i16_scalar: @@ -10809,104 +10810,104 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s28 -; SI-NEXT: v_alignbit_b32 v28, s29, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s26 -; SI-NEXT: v_alignbit_b32 v29, s27, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s24 -; SI-NEXT: v_alignbit_b32 v30, s25, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s22 -; SI-NEXT: v_alignbit_b32 v31, s23, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s20 -; SI-NEXT: v_alignbit_b32 v32, s21, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v33, s19, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v34, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s11, s27, 16 -; SI-NEXT: s_lshr_b32 s10, s25, 16 -; SI-NEXT: s_lshr_b32 s9, s23, 16 -; SI-NEXT: s_lshr_b32 s8, s21, 16 -; SI-NEXT: s_lshr_b32 s7, s19, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s58, s29, 16 +; SI-NEXT: s_lshr_b32 s57, s27, 16 +; SI-NEXT: s_lshr_b32 s56, s25, 16 +; SI-NEXT: s_lshr_b32 s47, s23, 16 +; SI-NEXT: s_lshr_b32 s46, s21, 16 +; SI-NEXT: s_lshr_b32 s45, s19, 16 +; SI-NEXT: s_lshr_b32 s44, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[4:5], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[8:9], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[12:13], s[22:23], 1.0 -; SI-NEXT: v_add_f64 v[16:17], s[24:25], 1.0 -; SI-NEXT: v_add_f64 v[24:25], s[28:29], 1.0 -; SI-NEXT: v_add_f64 v[20:21], s[26:27], 1.0 -; SI-NEXT: v_alignbit_b32 v28, v25, v24, 16 -; SI-NEXT: v_alignbit_b32 v29, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v30, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v31, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v32, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v33, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v34, v1, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_add_f64 v[28:29], s[28:29], 1.0 +; SI-NEXT: v_add_f64 v[30:31], s[26:27], 1.0 +; SI-NEXT: v_add_f64 v[32:33], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[34:35], s[22:23], 1.0 +; SI-NEXT: v_add_f64 v[36:37], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[38:39], s[18:19], 1.0 +; SI-NEXT: v_add_f64 v[48:49], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[30:31], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[34:35], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[48:49], 16 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v31 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v35 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v49 ; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr47 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr58 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v5, s19 -; SI-NEXT: v_mov_b32_e32 v9, s21 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v21, s27 -; SI-NEXT: v_mov_b32_e32 v25, s29 -; SI-NEXT: v_mov_b32_e32 v24, s28 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 -; SI-NEXT: v_mov_b32_e32 v7, s7 -; SI-NEXT: v_mov_b32_e32 v11, s8 -; SI-NEXT: v_mov_b32_e32 v15, s9 -; SI-NEXT: v_mov_b32_e32 v19, s10 -; SI-NEXT: v_mov_b32_e32 v23, s11 -; SI-NEXT: v_mov_b32_e32 v27, s12 +; SI-NEXT: v_mov_b32_e32 v29, s29 +; SI-NEXT: v_mov_b32_e32 v31, s27 +; SI-NEXT: v_mov_b32_e32 v33, s25 +; SI-NEXT: v_mov_b32_e32 v35, s23 +; SI-NEXT: v_mov_b32_e32 v37, s21 +; SI-NEXT: v_mov_b32_e32 v39, s19 +; SI-NEXT: v_mov_b32_e32 v49, s17 +; SI-NEXT: v_mov_b32_e32 v48, s16 +; SI-NEXT: v_mov_b32_e32 v38, s18 +; SI-NEXT: v_mov_b32_e32 v36, s20 +; SI-NEXT: v_mov_b32_e32 v34, s22 +; SI-NEXT: v_mov_b32_e32 v32, s24 +; SI-NEXT: v_mov_b32_e32 v30, s26 +; SI-NEXT: v_mov_b32_e32 v28, s28 +; SI-NEXT: v_mov_b32_e32 v27, s58 +; SI-NEXT: v_mov_b32_e32 v23, s57 +; SI-NEXT: v_mov_b32_e32 v19, s56 +; SI-NEXT: v_mov_b32_e32 v15, s47 +; SI-NEXT: v_mov_b32_e32 v11, s46 +; SI-NEXT: v_mov_b32_e32 v7, s45 +; SI-NEXT: v_mov_b32_e32 v3, s44 +; SI-NEXT: v_mov_b32_e32 v1, s40 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v9, s12 +; SI-NEXT: v_mov_b32_e32 v13, s10 +; SI-NEXT: v_mov_b32_e32 v17, s8 +; SI-NEXT: v_mov_b32_e32 v21, s6 +; SI-NEXT: v_mov_b32_e32 v25, s4 ; SI-NEXT: .LBB49_5: ; %end -; SI-NEXT: v_mov_b32_e32 v2, v1 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v14, v13 -; SI-NEXT: v_mov_b32_e32 v18, v17 -; SI-NEXT: v_mov_b32_e32 v22, v21 -; SI-NEXT: v_mov_b32_e32 v26, v25 -; SI-NEXT: v_mov_b32_e32 v1, v34 -; SI-NEXT: v_mov_b32_e32 v5, v33 -; SI-NEXT: v_mov_b32_e32 v9, v32 -; SI-NEXT: v_mov_b32_e32 v13, v31 -; SI-NEXT: v_mov_b32_e32 v17, v30 -; SI-NEXT: v_mov_b32_e32 v21, v29 -; SI-NEXT: v_mov_b32_e32 v25, v28 +; SI-NEXT: v_mov_b32_e32 v0, v48 +; SI-NEXT: v_mov_b32_e32 v2, v49 +; SI-NEXT: v_mov_b32_e32 v4, v38 +; SI-NEXT: v_mov_b32_e32 v6, v39 +; SI-NEXT: v_mov_b32_e32 v8, v36 +; SI-NEXT: v_mov_b32_e32 v10, v37 +; SI-NEXT: v_mov_b32_e32 v12, v34 +; SI-NEXT: v_mov_b32_e32 v14, v35 +; SI-NEXT: v_mov_b32_e32 v16, v32 +; SI-NEXT: v_mov_b32_e32 v18, v33 +; SI-NEXT: v_mov_b32_e32 v20, v30 +; SI-NEXT: v_mov_b32_e32 v22, v31 +; SI-NEXT: v_mov_b32_e32 v24, v28 +; SI-NEXT: v_mov_b32_e32 v26, v29 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v7f64_to_v28i16_scalar: @@ -13866,83 +13867,107 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i ; SI-LABEL: bitcast_v28f16_to_v28i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v19, v5 +; SI-NEXT: v_mov_b32_e32 v17, v12 +; SI-NEXT: v_mov_b32_e32 v21, v11 +; SI-NEXT: v_mov_b32_e32 v22, v8 +; SI-NEXT: v_mov_b32_e32 v25, v7 ; SI-NEXT: v_mov_b32_e32 v18, v4 -; SI-NEXT: v_mov_b32_e32 v17, v3 -; SI-NEXT: v_mov_b32_e32 v16, v2 -; SI-NEXT: v_mov_b32_e32 v15, v1 -; SI-NEXT: v_mov_b32_e32 v20, v0 +; SI-NEXT: v_mov_b32_e32 v26, v3 +; SI-NEXT: v_mov_b32_e32 v28, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v23, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v27, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v32, s21 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v29, s25 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 ; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 ; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v35 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v29 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_or_b32_e32 v18, v18, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v36 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v32 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v30 +; SI-NEXT: v_or_b32_e32 v22, v22, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 @@ -13950,48 +13975,30 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_or_b32_e32 v26, v26, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_or_b32_e32 v22, v22, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_or_b32_e32 v18, v18, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_or_b32_e32 v14, v14, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 @@ -13999,18 +14006,20 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_or_b32_e32 v10, v10, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v28 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshr_b64 v[34:35], v[1:2], 16 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 ; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; SI-NEXT: v_lshr_b64 v[35:36], v[13:14], 16 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_or_b32_e32 v26, v26, v28 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: v_lshr_b64 v[29:30], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[25:26], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 @@ -14018,14 +14027,14 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i ; SI-NEXT: v_or_b32_e32 v16, v16, v17 ; SI-NEXT: v_or_b32_e32 v20, v20, v21 ; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 ; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v34 +; SI-NEXT: v_mov_b32_e32 v5, v32 +; SI-NEXT: v_mov_b32_e32 v9, v29 +; SI-NEXT: v_mov_b32_e32 v13, v35 +; SI-NEXT: v_mov_b32_e32 v17, v30 +; SI-NEXT: v_mov_b32_e32 v21, v36 +; SI-NEXT: v_mov_b32_e32 v25, v37 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: ; SI-NEXT: s_branch .LBB59_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll index fd190b23dd8ca..acc02472c7161 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll @@ -903,31 +903,32 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3 ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v4, 1.0, s18 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v5 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 +; SI-NEXT: v_lshr_b64 v[3:4], v[1:2], 16 +; SI-NEXT: v_alignbit_b32 v0, v5, v0, 16 ; SI-NEXT: .LBB5_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v3 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: s_branch .LBB5_2 ; @@ -1391,26 +1392,27 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v3, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB9_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB9_3 ; SI-NEXT: .LBB9_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshr_b64 v[3:4], v[1:2], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 ; SI-NEXT: .LBB9_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v3 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB9_4: ; SI-NEXT: s_branch .LBB9_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll index ee43eff01ab60..e33493c6a760e 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll @@ -2143,96 +2143,113 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v16i32_to_v32i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v3, v2 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_mov_b32_e32 v30, v1 -; SI-NEXT: v_mov_b32_e32 v28, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: v_readfirstlane_b32 s4, v0 +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_cbranch_scc0 .LBB13_4 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_lshr_b32 s56, s5, 16 +; SI-NEXT: s_lshr_b32 s57, s29, 16 +; SI-NEXT: s_lshr_b32 s58, s27, 16 +; SI-NEXT: s_lshr_b32 s59, s25, 16 +; SI-NEXT: s_lshr_b32 s60, s23, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 16 +; SI-NEXT: s_lshr_b32 s63, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: s_cbranch_execnz .LBB13_3 +; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s56, s5, 16 +; SI-NEXT: s_lshr_b32 s57, s29, 16 +; SI-NEXT: s_lshr_b32 s58, s27, 16 +; SI-NEXT: s_lshr_b32 s59, s25, 16 +; SI-NEXT: s_lshr_b32 s60, s23, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 16 +; SI-NEXT: s_lshr_b32 s63, s17, 16 +; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s44 ; SI-NEXT: v_mov_b32_e32 v2, s17 +; SI-NEXT: v_mov_b32_e32 v3, s63 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s42 ; SI-NEXT: v_mov_b32_e32 v6, s19 +; SI-NEXT: v_mov_b32_e32 v7, s62 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s40 ; SI-NEXT: v_mov_b32_e32 v10, s21 +; SI-NEXT: v_mov_b32_e32 v11, s61 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s14 ; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v15, s60 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s12 ; SI-NEXT: v_mov_b32_e32 v18, s25 +; SI-NEXT: v_mov_b32_e32 v19, s59 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s10 ; SI-NEXT: v_mov_b32_e32 v22, s27 +; SI-NEXT: v_mov_b32_e32 v23, s58 ; SI-NEXT: v_mov_b32_e32 v24, s28 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v25, s8 ; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: s_cbranch_scc0 .LBB13_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_cbranch_execnz .LBB13_3 -; SI-NEXT: .LBB13_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2 -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v6 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 -; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v10 -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v8 -; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14 -; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v12 -; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18 -; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 -; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22 -; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20 -; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26 -; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24 -; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30 -; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28 -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: .LBB13_3: ; %end +; SI-NEXT: v_mov_b32_e32 v27, s57 +; SI-NEXT: v_mov_b32_e32 v28, s4 +; SI-NEXT: v_mov_b32_e32 v29, s6 +; SI-NEXT: v_mov_b32_e32 v30, s5 +; SI-NEXT: v_mov_b32_e32 v31, s56 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v16i32_to_v32i16_scalar: @@ -9385,386 +9402,449 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32 ; SI-LABEL: bitcast_v16i32_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v4, s30, 0 +; SI-NEXT: v_writelane_b32 v4, s31, 1 +; SI-NEXT: v_writelane_b32 v4, s34, 2 +; SI-NEXT: v_writelane_b32 v4, s35, 3 +; SI-NEXT: v_writelane_b32 v4, s36, 4 +; SI-NEXT: v_writelane_b32 v4, s37, 5 +; SI-NEXT: v_writelane_b32 v4, s38, 6 +; SI-NEXT: v_writelane_b32 v4, s39, 7 +; SI-NEXT: v_writelane_b32 v4, s48, 8 +; SI-NEXT: v_writelane_b32 v4, s49, 9 +; SI-NEXT: v_writelane_b32 v4, s50, 10 +; SI-NEXT: v_writelane_b32 v4, s51, 11 +; SI-NEXT: v_writelane_b32 v4, s52, 12 +; SI-NEXT: v_writelane_b32 v4, s53, 13 +; SI-NEXT: v_writelane_b32 v4, s54, 14 +; SI-NEXT: v_writelane_b32 v4, s55, 15 +; SI-NEXT: v_writelane_b32 v4, s64, 16 +; SI-NEXT: v_writelane_b32 v4, s65, 17 +; SI-NEXT: v_writelane_b32 v4, s66, 18 +; SI-NEXT: v_writelane_b32 v4, s67, 19 +; SI-NEXT: v_writelane_b32 v4, s68, 20 +; SI-NEXT: v_writelane_b32 v4, s69, 21 +; SI-NEXT: v_writelane_b32 v4, s70, 22 +; SI-NEXT: v_writelane_b32 v4, s71, 23 +; SI-NEXT: v_writelane_b32 v4, s80, 24 +; SI-NEXT: v_writelane_b32 v4, s81, 25 +; SI-NEXT: v_writelane_b32 v4, s82, 26 +; SI-NEXT: v_writelane_b32 v4, s83, 27 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v2 +; SI-NEXT: v_writelane_b32 v4, s84, 28 +; SI-NEXT: v_readfirstlane_b32 s4, v1 +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_writelane_b32 v4, s85, 29 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_alignbit_b32 v7, s27, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v9, 8 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s29, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 8 -; SI-NEXT: v_alignbit_b32 v13, s25, v9, 24 -; SI-NEXT: v_alignbit_b32 v15, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 8 -; SI-NEXT: v_alignbit_b32 v11, s23, v14, 24 -; SI-NEXT: v_alignbit_b32 v12, s23, v14, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v14, 8 -; SI-NEXT: v_alignbit_b32 v16, s21, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s21, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s21, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s19, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s19, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s19, v21, 8 -; SI-NEXT: v_alignbit_b32 v23, s17, v22, 24 -; SI-NEXT: v_alignbit_b32 v24, s17, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, s17, v22, 8 -; SI-NEXT: s_lshr_b32 s8, s6, 24 -; SI-NEXT: s_lshr_b32 s9, s6, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_lshr_b32 s38, s5, 24 +; SI-NEXT: s_lshr_b32 s39, s5, 16 +; SI-NEXT: s_lshr_b32 s48, s5, 8 +; SI-NEXT: s_lshr_b32 s49, s29, 24 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s29, 8 +; SI-NEXT: s_lshr_b32 s52, s27, 24 +; SI-NEXT: s_lshr_b32 s53, s27, 16 +; SI-NEXT: s_lshr_b32 s54, s27, 8 +; SI-NEXT: s_lshr_b32 s55, s25, 24 +; SI-NEXT: s_lshr_b32 s64, s25, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 8 +; SI-NEXT: s_lshr_b32 s66, s23, 24 +; SI-NEXT: s_lshr_b32 s67, s23, 16 +; SI-NEXT: s_lshr_b32 s68, s23, 8 +; SI-NEXT: s_lshr_b32 s69, s21, 24 +; SI-NEXT: s_lshr_b32 s70, s21, 16 +; SI-NEXT: s_lshr_b32 s71, s21, 8 +; SI-NEXT: s_lshr_b32 s80, s19, 24 +; SI-NEXT: s_lshr_b32 s81, s19, 16 +; SI-NEXT: s_lshr_b32 s82, s19, 8 +; SI-NEXT: s_lshr_b32 s83, s17, 24 +; SI-NEXT: s_lshr_b32 s84, s17, 16 +; SI-NEXT: s_lshr_b32 s85, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true -; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 ; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 +; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_alignbit_b32 v7, s27, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v9, 8 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s29, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 8 -; SI-NEXT: v_alignbit_b32 v13, s25, v9, 24 -; SI-NEXT: v_alignbit_b32 v15, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 8 -; SI-NEXT: v_alignbit_b32 v11, s23, v14, 24 -; SI-NEXT: v_alignbit_b32 v12, s23, v14, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v14, 8 -; SI-NEXT: v_alignbit_b32 v16, s21, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s21, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s21, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s19, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s19, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s19, v21, 8 -; SI-NEXT: v_alignbit_b32 v23, s17, v22, 24 -; SI-NEXT: v_alignbit_b32 v24, s17, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, s17, v22, 8 -; SI-NEXT: s_lshr_b32 s8, s6, 24 -; SI-NEXT: s_lshr_b32 s9, s6, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 16 +; SI-NEXT: s_lshr_b32 s38, s5, 24 +; SI-NEXT: s_lshr_b32 s39, s5, 16 +; SI-NEXT: s_lshr_b32 s48, s5, 8 +; SI-NEXT: s_lshr_b32 s49, s29, 24 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s29, 8 +; SI-NEXT: s_lshr_b32 s52, s27, 24 +; SI-NEXT: s_lshr_b32 s53, s27, 16 +; SI-NEXT: s_lshr_b32 s54, s27, 8 +; SI-NEXT: s_lshr_b32 s55, s25, 24 +; SI-NEXT: s_lshr_b32 s64, s25, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 8 +; SI-NEXT: s_lshr_b32 s66, s23, 24 +; SI-NEXT: s_lshr_b32 s67, s23, 16 +; SI-NEXT: s_lshr_b32 s68, s23, 8 +; SI-NEXT: s_lshr_b32 s69, s21, 24 +; SI-NEXT: s_lshr_b32 s70, s21, 16 +; SI-NEXT: s_lshr_b32 s71, s21, 8 +; SI-NEXT: s_lshr_b32 s80, s19, 24 +; SI-NEXT: s_lshr_b32 s81, s19, 16 +; SI-NEXT: s_lshr_b32 s82, s19, 8 +; SI-NEXT: s_lshr_b32 s83, s17, 24 +; SI-NEXT: s_lshr_b32 s84, s17, 16 +; SI-NEXT: s_lshr_b32 s85, s17, 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 8 ; SI-NEXT: .LBB25_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s63, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s62, 0xff -; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s61, 24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v21, s4, v21 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s60, 8 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s59, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s58, 24 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v19, v21, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v20, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 -; SI-NEXT: v_or_b32_e32 v18, s4, v18 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s57, 8 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s56, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s47, 24 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v16, v18, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v20, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s46, 8 -; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s44, 24 -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v12 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: s_lshl_b32 s7, s36, 8 +; SI-NEXT: s_and_b32 s9, s16, 0xff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s9, s34, 0xff +; SI-NEXT: s_lshl_b32 s11, s30, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v1, s7 +; SI-NEXT: s_and_b32 s7, s17, 0xff +; SI-NEXT: s_lshl_b32 s9, s85, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s84, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s83, 24 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_lshl_b32 s7, s94, 8 +; SI-NEXT: s_and_b32 s9, s18, 0xff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s9, s92, 0xff +; SI-NEXT: s_lshl_b32 s11, s90, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: s_and_b32 s7, s19, 0xff +; SI-NEXT: s_lshl_b32 s9, s82, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s81, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s80, 24 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_add_i32_e32 v12, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s43, 8 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xff, v15 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s42, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s9, s78, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s76, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s74, 24 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s21, 0xff +; SI-NEXT: s_lshl_b32 s9, s71, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s70, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s69, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s41, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v9, v9, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s9, s88, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s72, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s62, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s23, 0xff +; SI-NEXT: s_lshl_b32 s9, s68, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s67, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s66, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s24, 0xff +; SI-NEXT: s_lshl_b32 s9, s60, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s58, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s56, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s25, 0xff +; SI-NEXT: s_lshl_b32 s9, s65, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s64, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s55, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: buffer_store_dword v11, v9, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v10 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s14, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s13, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s12, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s11, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s26, 0xff +; SI-NEXT: s_lshl_b32 s9, s46, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s44, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s42, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s27, 0xff +; SI-NEXT: s_lshl_b32 s9, s54, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s53, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s52, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s40, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s14, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s29, 0xff +; SI-NEXT: s_lshl_b32 s9, s51, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s50, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s49, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s7, s10, 8 +; SI-NEXT: s_or_b32 s4, s4, s7 +; SI-NEXT: s_and_b32 s7, s8, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s11, s5 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s10, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s48, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_and_b32 s5, s39, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s8, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s38, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s85, v4, 29 +; SI-NEXT: v_readlane_b32 s84, v4, 28 +; SI-NEXT: v_readlane_b32 s83, v4, 27 +; SI-NEXT: v_readlane_b32 s82, v4, 26 +; SI-NEXT: v_readlane_b32 s81, v4, 25 +; SI-NEXT: v_readlane_b32 s80, v4, 24 +; SI-NEXT: v_readlane_b32 s71, v4, 23 +; SI-NEXT: v_readlane_b32 s70, v4, 22 +; SI-NEXT: v_readlane_b32 s69, v4, 21 +; SI-NEXT: v_readlane_b32 s68, v4, 20 +; SI-NEXT: v_readlane_b32 s67, v4, 19 +; SI-NEXT: v_readlane_b32 s66, v4, 18 +; SI-NEXT: v_readlane_b32 s65, v4, 17 +; SI-NEXT: v_readlane_b32 s64, v4, 16 +; SI-NEXT: v_readlane_b32 s55, v4, 15 +; SI-NEXT: v_readlane_b32 s54, v4, 14 +; SI-NEXT: v_readlane_b32 s53, v4, 13 +; SI-NEXT: v_readlane_b32 s52, v4, 12 +; SI-NEXT: v_readlane_b32 s51, v4, 11 +; SI-NEXT: v_readlane_b32 s50, v4, 10 +; SI-NEXT: v_readlane_b32 s49, v4, 9 +; SI-NEXT: v_readlane_b32 s48, v4, 8 +; SI-NEXT: v_readlane_b32 s39, v4, 7 +; SI-NEXT: v_readlane_b32 s38, v4, 6 +; SI-NEXT: v_readlane_b32 s37, v4, 5 +; SI-NEXT: v_readlane_b32 s36, v4, 4 +; SI-NEXT: v_readlane_b32 s35, v4, 3 +; SI-NEXT: v_readlane_b32 s34, v4, 2 +; SI-NEXT: v_readlane_b32 s31, v4, 1 +; SI-NEXT: v_readlane_b32 s30, v4, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr85 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr83 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr15 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_v16i32_to_v64i8_scalar: @@ -16750,78 +16830,93 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a, ; SI-LABEL: bitcast_v16f32_to_v32i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v3, v2 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_mov_b32_e32 v30, v1 -; SI-NEXT: v_mov_b32_e32 v28, v0 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v6, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v10, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v14, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v18, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v22, s27 -; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: v_mov_b32_e32 v33, v1 +; SI-NEXT: v_mov_b32_e32 v32, v0 +; SI-NEXT: v_mov_b32_e32 v34, s16 +; SI-NEXT: v_mov_b32_e32 v35, s17 +; SI-NEXT: v_mov_b32_e32 v36, s18 +; SI-NEXT: v_mov_b32_e32 v37, s19 +; SI-NEXT: v_mov_b32_e32 v38, s20 +; SI-NEXT: v_mov_b32_e32 v39, s21 +; SI-NEXT: v_mov_b32_e32 v48, s22 +; SI-NEXT: v_mov_b32_e32 v49, s23 +; SI-NEXT: v_mov_b32_e32 v50, s24 +; SI-NEXT: v_mov_b32_e32 v51, s25 +; SI-NEXT: v_mov_b32_e32 v52, s26 +; SI-NEXT: v_mov_b32_e32 v53, s27 +; SI-NEXT: v_mov_b32_e32 v54, s28 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v26, s29 +; SI-NEXT: v_mov_b32_e32 v55, s29 ; SI-NEXT: s_cbranch_scc0 .LBB37_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 +; SI-NEXT: v_lshr_b64 v[29:30], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[54:55], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 ; SI-NEXT: s_cbranch_execnz .LBB37_3 ; SI-NEXT: .LBB37_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v0, 1.0, v0 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 -; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; SI-NEXT: v_add_f32_e32 v35, 1.0, v35 +; SI-NEXT: v_add_f32_e32 v34, 1.0, v34 +; SI-NEXT: v_add_f32_e32 v37, 1.0, v37 +; SI-NEXT: v_add_f32_e32 v36, 1.0, v36 +; SI-NEXT: v_add_f32_e32 v39, 1.0, v39 +; SI-NEXT: v_add_f32_e32 v38, 1.0, v38 +; SI-NEXT: v_add_f32_e32 v49, 1.0, v49 +; SI-NEXT: v_add_f32_e32 v48, 1.0, v48 +; SI-NEXT: v_add_f32_e32 v51, 1.0, v51 +; SI-NEXT: v_add_f32_e32 v50, 1.0, v50 +; SI-NEXT: v_add_f32_e32 v53, 1.0, v53 +; SI-NEXT: v_add_f32_e32 v52, 1.0, v52 +; SI-NEXT: v_add_f32_e32 v55, 1.0, v55 +; SI-NEXT: v_add_f32_e32 v54, 1.0, v54 +; SI-NEXT: v_add_f32_e32 v33, 1.0, v33 +; SI-NEXT: v_add_f32_e32 v32, 1.0, v32 +; SI-NEXT: v_lshr_b64 v[29:30], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[54:55], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 ; SI-NEXT: .LBB37_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v34 +; SI-NEXT: v_mov_b32_e32 v2, v35 +; SI-NEXT: v_mov_b32_e32 v4, v36 +; SI-NEXT: v_mov_b32_e32 v6, v37 +; SI-NEXT: v_mov_b32_e32 v8, v38 +; SI-NEXT: v_mov_b32_e32 v10, v39 +; SI-NEXT: v_mov_b32_e32 v12, v48 +; SI-NEXT: v_mov_b32_e32 v14, v49 +; SI-NEXT: v_mov_b32_e32 v16, v50 +; SI-NEXT: v_mov_b32_e32 v18, v51 +; SI-NEXT: v_mov_b32_e32 v20, v52 +; SI-NEXT: v_mov_b32_e32 v22, v53 +; SI-NEXT: v_mov_b32_e32 v24, v54 +; SI-NEXT: v_mov_b32_e32 v26, v55 +; SI-NEXT: v_mov_b32_e32 v28, v32 +; SI-NEXT: v_mov_b32_e32 v30, v33 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB37_4: ; SI-NEXT: ; implicit-def: $vgpr1 @@ -16836,10 +16931,10 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a, ; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: s_branch .LBB37_2 ; ; VI-LABEL: bitcast_v16f32_to_v32i16_scalar: @@ -23954,433 +24049,494 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3 ; SI-LABEL: bitcast_v16f32_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v40, s30, 0 +; SI-NEXT: v_writelane_b32 v40, s31, 1 +; SI-NEXT: v_writelane_b32 v40, s34, 2 +; SI-NEXT: v_writelane_b32 v40, s35, 3 +; SI-NEXT: v_writelane_b32 v40, s36, 4 +; SI-NEXT: v_writelane_b32 v40, s37, 5 +; SI-NEXT: v_writelane_b32 v40, s38, 6 +; SI-NEXT: v_writelane_b32 v40, s39, 7 +; SI-NEXT: v_writelane_b32 v40, s48, 8 +; SI-NEXT: v_writelane_b32 v40, s49, 9 +; SI-NEXT: v_writelane_b32 v40, s50, 10 +; SI-NEXT: v_writelane_b32 v40, s51, 11 +; SI-NEXT: v_writelane_b32 v40, s52, 12 +; SI-NEXT: v_writelane_b32 v40, s53, 13 +; SI-NEXT: v_writelane_b32 v40, s54, 14 +; SI-NEXT: v_writelane_b32 v40, s55, 15 +; SI-NEXT: v_writelane_b32 v40, s64, 16 +; SI-NEXT: v_writelane_b32 v40, s65, 17 +; SI-NEXT: v_writelane_b32 v40, s66, 18 +; SI-NEXT: v_writelane_b32 v40, s67, 19 +; SI-NEXT: v_writelane_b32 v40, s68, 20 +; SI-NEXT: v_writelane_b32 v40, s69, 21 +; SI-NEXT: v_writelane_b32 v40, s70, 22 +; SI-NEXT: v_writelane_b32 v40, s71, 23 +; SI-NEXT: v_writelane_b32 v40, s80, 24 +; SI-NEXT: v_writelane_b32 v40, s81, 25 +; SI-NEXT: v_writelane_b32 v40, s82, 26 +; SI-NEXT: v_writelane_b32 v40, s83, 27 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_mov_b32_e32 v28, s16 -; SI-NEXT: v_mov_b32_e32 v25, s17 -; SI-NEXT: v_mov_b32_e32 v20, s18 -; SI-NEXT: v_mov_b32_e32 v19, s19 +; SI-NEXT: v_writelane_b32 v40, s84, 28 +; SI-NEXT: v_readfirstlane_b32 s36, v1 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s37, v2 +; SI-NEXT: v_writelane_b32 v40, s85, 29 +; SI-NEXT: s_cbranch_scc0 .LBB49_3 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_lshr_b32 s82, s37, 24 +; SI-NEXT: s_lshr_b32 s84, s37, 16 +; SI-NEXT: s_lshr_b32 s85, s37, 8 +; SI-NEXT: s_lshr_b32 s71, s29, 24 +; SI-NEXT: s_lshr_b32 s81, s29, 16 +; SI-NEXT: s_lshr_b32 s83, s29, 8 +; SI-NEXT: s_lshr_b32 s68, s27, 24 +; SI-NEXT: s_lshr_b32 s70, s27, 16 +; SI-NEXT: s_lshr_b32 s80, s27, 8 +; SI-NEXT: s_lshr_b32 s65, s25, 24 +; SI-NEXT: s_lshr_b32 s67, s25, 16 +; SI-NEXT: s_lshr_b32 s69, s25, 8 +; SI-NEXT: s_lshr_b32 s54, s23, 24 +; SI-NEXT: s_lshr_b32 s64, s23, 16 +; SI-NEXT: s_lshr_b32 s66, s23, 8 +; SI-NEXT: s_lshr_b32 s51, s21, 24 +; SI-NEXT: s_lshr_b32 s53, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s21, 8 +; SI-NEXT: s_lshr_b32 s48, s19, 24 +; SI-NEXT: s_lshr_b32 s50, s19, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 8 +; SI-NEXT: s_lshr_b32 s38, s17, 24 +; SI-NEXT: s_lshr_b32 s39, s17, 16 +; SI-NEXT: s_lshr_b32 s49, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[36:37], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[36:37], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[36:37], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 8 +; SI-NEXT: s_cbranch_execnz .LBB49_4 +; SI-NEXT: .LBB49_2: ; %cmp.true +; SI-NEXT: v_add_f32_e64 v20, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v22, s16, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s19, 1.0 +; SI-NEXT: v_add_f32_e64 v18, s18, 1.0 +; SI-NEXT: v_add_f32_e64 v11, s21, 1.0 +; SI-NEXT: v_add_f32_e64 v15, s20, 1.0 +; SI-NEXT: v_add_f32_e64 v9, s23, 1.0 +; SI-NEXT: v_add_f32_e64 v10, s22, 1.0 +; SI-NEXT: v_add_f32_e64 v7, s25, 1.0 +; SI-NEXT: v_add_f32_e64 v8, s24, 1.0 +; SI-NEXT: v_add_f32_e64 v5, s27, 1.0 +; SI-NEXT: v_add_f32_e64 v6, s26, 1.0 +; SI-NEXT: v_add_f32_e64 v3, s29, 1.0 +; SI-NEXT: v_add_f32_e64 v4, s28, 1.0 +; SI-NEXT: v_add_f32_e64 v1, s37, 1.0 +; SI-NEXT: v_add_f32_e64 v2, s36, 1.0 +; SI-NEXT: v_readfirstlane_b32 s16, v22 +; SI-NEXT: v_readfirstlane_b32 s17, v20 +; SI-NEXT: v_readfirstlane_b32 s18, v18 +; SI-NEXT: v_readfirstlane_b32 s19, v16 +; SI-NEXT: v_readfirstlane_b32 s20, v15 +; SI-NEXT: v_readfirstlane_b32 s21, v11 +; SI-NEXT: v_readfirstlane_b32 s22, v10 +; SI-NEXT: v_readfirstlane_b32 s23, v9 +; SI-NEXT: v_readfirstlane_b32 s24, v8 +; SI-NEXT: v_readfirstlane_b32 s25, v7 +; SI-NEXT: v_readfirstlane_b32 s26, v6 +; SI-NEXT: v_readfirstlane_b32 s27, v5 +; SI-NEXT: v_readfirstlane_b32 s14, v4 +; SI-NEXT: v_readfirstlane_b32 s15, v3 +; SI-NEXT: v_readfirstlane_b32 s8, v2 +; SI-NEXT: v_readfirstlane_b32 s9, v1 +; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[72:73], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[94:95], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 8 +; SI-NEXT: v_lshrrev_b32_e32 v12, 24, v1 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v1 +; SI-NEXT: v_lshrrev_b32_e32 v17, 24, v3 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v3 +; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v5 +; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v5 +; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v7 +; SI-NEXT: v_lshrrev_b32_e32 v29, 24, v9 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v9 +; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v11 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v11 +; SI-NEXT: v_lshrrev_b32_e32 v35, 24, v16 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v37, 8, v16 +; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v20 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v20 +; SI-NEXT: s_branch .LBB49_5 +; SI-NEXT: .LBB49_3: +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr83 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr85 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_branch .LBB49_2 +; SI-NEXT: .LBB49_4: +; SI-NEXT: v_mov_b32_e32 v22, s16 +; SI-NEXT: v_mov_b32_e32 v20, s17 +; SI-NEXT: v_mov_b32_e32 v18, s18 +; SI-NEXT: v_mov_b32_e32 v16, s19 ; SI-NEXT: v_mov_b32_e32 v15, s20 -; SI-NEXT: v_mov_b32_e32 v14, s21 -; SI-NEXT: v_mov_b32_e32 v11, s22 +; SI-NEXT: v_mov_b32_e32 v11, s21 +; SI-NEXT: v_mov_b32_e32 v10, s22 ; SI-NEXT: v_mov_b32_e32 v9, s23 ; SI-NEXT: v_mov_b32_e32 v8, s24 ; SI-NEXT: v_mov_b32_e32 v7, s25 ; SI-NEXT: v_mov_b32_e32 v6, s26 ; SI-NEXT: v_mov_b32_e32 v5, s27 ; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v3, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB49_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v10, v2, v1, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v12, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v13, v2, v1, 8 -; SI-NEXT: v_alignbit_b32 v16, v3, v4, 24 -; SI-NEXT: v_alignbit_b32 v17, v3, v4, 16 -; SI-NEXT: v_alignbit_b32 v18, v3, v4, 8 -; SI-NEXT: v_alignbit_b32 v21, v5, v6, 24 -; SI-NEXT: v_alignbit_b32 v22, v5, v6, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v6, 8 -; SI-NEXT: v_alignbit_b32 v29, v7, v8, 24 -; SI-NEXT: v_alignbit_b32 v30, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v31, v7, v8, 8 -; SI-NEXT: v_alignbit_b32 v35, v9, v11, 24 -; SI-NEXT: v_alignbit_b32 v36, v9, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v9, v11, 8 -; SI-NEXT: v_alignbit_b32 v49, v14, v15, 24 -; SI-NEXT: v_alignbit_b32 v50, v14, v15, 16 -; SI-NEXT: v_alignbit_b32 v52, v14, v15, 8 -; SI-NEXT: v_alignbit_b32 v55, v19, v20, 24 -; SI-NEXT: v_alignbit_b32 v41, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v43, v19, v20, 8 -; SI-NEXT: v_alignbit_b32 v46, v25, v28, 24 -; SI-NEXT: v_alignbit_b32 v56, v25, v28, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v25, v28, 8 -; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v2 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v2 -; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v3 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v3 -; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v3 -; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v5 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v5 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v7 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v7 -; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v9 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v9 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v14 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v14 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v19 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v19 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 8, v19 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 24, v25 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v25 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v25 -; SI-NEXT: s_cbranch_execnz .LBB49_3 -; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_alignbit_b32 v10, v2, v1, 24 -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: v_alignbit_b32 v12, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v13, v2, v1, 8 -; SI-NEXT: v_alignbit_b32 v16, v3, v4, 24 -; SI-NEXT: v_alignbit_b32 v17, v3, v4, 16 -; SI-NEXT: v_alignbit_b32 v18, v3, v4, 8 -; SI-NEXT: v_alignbit_b32 v21, v5, v6, 24 -; SI-NEXT: v_alignbit_b32 v22, v5, v6, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v6, 8 -; SI-NEXT: v_alignbit_b32 v29, v7, v8, 24 -; SI-NEXT: v_alignbit_b32 v30, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v31, v7, v8, 8 -; SI-NEXT: v_alignbit_b32 v35, v9, v11, 24 -; SI-NEXT: v_alignbit_b32 v36, v9, v11, 16 -; SI-NEXT: v_alignbit_b32 v37, v9, v11, 8 -; SI-NEXT: v_alignbit_b32 v49, v14, v15, 24 -; SI-NEXT: v_alignbit_b32 v50, v14, v15, 16 -; SI-NEXT: v_alignbit_b32 v52, v14, v15, 8 -; SI-NEXT: v_alignbit_b32 v55, v19, v20, 24 -; SI-NEXT: v_alignbit_b32 v41, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v43, v19, v20, 8 -; SI-NEXT: v_alignbit_b32 v46, v25, v28, 24 -; SI-NEXT: v_alignbit_b32 v56, v25, v28, 16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v58, v25, v28, 8 -; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v2 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v2 -; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v3 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v3 -; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v3 -; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v5 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v5 -; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v7 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v7 -; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v9 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v9 -; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v14 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v14 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v19 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v19 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v61, 8, v19 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v62, 24, v25 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v25 +; SI-NEXT: v_mov_b32_e32 v2, s36 +; SI-NEXT: v_mov_b32_e32 v1, s37 +; SI-NEXT: v_mov_b32_e32 v48, s49 +; SI-NEXT: v_mov_b32_e32 v39, s39 +; SI-NEXT: v_mov_b32_e32 v38, s38 +; SI-NEXT: v_mov_b32_e32 v37, s52 +; SI-NEXT: v_mov_b32_e32 v36, s50 +; SI-NEXT: v_mov_b32_e32 v35, s48 +; SI-NEXT: v_mov_b32_e32 v34, s55 +; SI-NEXT: v_mov_b32_e32 v33, s53 +; SI-NEXT: v_mov_b32_e32 v32, s51 +; SI-NEXT: v_mov_b32_e32 v31, s66 +; SI-NEXT: v_mov_b32_e32 v30, s64 +; SI-NEXT: v_mov_b32_e32 v29, s54 +; SI-NEXT: v_mov_b32_e32 v28, s69 +; SI-NEXT: v_mov_b32_e32 v27, s67 +; SI-NEXT: v_mov_b32_e32 v26, s65 +; SI-NEXT: v_mov_b32_e32 v25, s80 +; SI-NEXT: v_mov_b32_e32 v24, s70 +; SI-NEXT: v_mov_b32_e32 v23, s68 +; SI-NEXT: v_mov_b32_e32 v21, s83 +; SI-NEXT: v_mov_b32_e32 v19, s81 +; SI-NEXT: v_mov_b32_e32 v17, s71 +; SI-NEXT: v_mov_b32_e32 v14, s85 +; SI-NEXT: v_mov_b32_e32 v13, s84 +; SI-NEXT: v_mov_b32_e32 v12, s82 +; SI-NEXT: .LBB49_5: ; %end +; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 +; SI-NEXT: s_lshl_b32 s5, s34, 8 +; SI-NEXT: v_or_b32_e32 v22, s5, v22 +; SI-NEXT: s_and_b32 s5, s30, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s94, 24 +; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: v_or_b32_e32 v22, s5, v22 +; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v25 -; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: v_and_b32_e32 v28, 0xff, v28 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v58, 8, v58 -; SI-NEXT: v_and_b32_e32 v56, 0xff, v56 -; SI-NEXT: v_or_b32_e32 v28, v28, v58 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56 -; SI-NEXT: v_lshlrev_b32_e32 v46, 24, v46 -; SI-NEXT: v_or_b32_e32 v46, v46, v56 -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; SI-NEXT: v_and_b32_e32 v25, 0xff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 -; SI-NEXT: v_or_b32_e32 v28, v28, v46 -; SI-NEXT: v_or_b32_e32 v10, v25, v10 -; SI-NEXT: v_and_b32_e32 v25, 0xff, v63 -; SI-NEXT: buffer_store_dword v28, v0, s[0:3], 0 offen -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v48 +; SI-NEXT: v_or_b32_e32 v20, v20, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xff, v39 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v18 +; SI-NEXT: s_lshl_b32 s5, s92, 8 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v38, 24, v38 +; SI-NEXT: v_or_b32_e32 v18, s5, v18 +; SI-NEXT: s_and_b32 s5, s90, 0xff +; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_or_b32_e32 v22, v38, v22 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s88, 24 +; SI-NEXT: v_or_b32_e32 v20, v20, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v18, s5, v18 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v62 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v25, v28, v25 -; SI-NEXT: v_or_b32_e32 v10, v10, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v10, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v43 -; SI-NEXT: v_or_b32_e32 v10, v10, v20 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v41 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v55 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v20, v25, v20 -; SI-NEXT: v_or_b32_e32 v10, v10, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v10, v20, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v18, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v61 -; SI-NEXT: v_or_b32_e32 v10, v10, v19 -; SI-NEXT: v_and_b32_e32 v19, 0xff, v60 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v59 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_or_b32_e32 v10, v10, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v10, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v52 -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v50 +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v37 +; SI-NEXT: v_or_b32_e32 v16, v16, v18 +; SI-NEXT: v_and_b32_e32 v18, 0xff, v36 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 +; SI-NEXT: s_lshl_b32 s5, s74, 8 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v35 +; SI-NEXT: v_or_b32_e32 v15, s5, v15 +; SI-NEXT: s_and_b32 s5, s62, 0xff +; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_or_b32_e32 v18, v20, v18 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s60, 24 +; SI-NEXT: v_or_b32_e32 v16, v16, v18 +; SI-NEXT: v_add_i32_e32 v18, vcc, 12, v0 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v16, v18, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v15, s5, v15 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v34 +; SI-NEXT: v_or_b32_e32 v11, v11, v15 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v33 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: s_lshl_b32 s5, s78, 8 ; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v49 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v15, v19, v15 -; SI-NEXT: v_or_b32_e32 v10, v10, v15 -; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v10, v15, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v57 -; SI-NEXT: v_or_b32_e32 v10, v10, v14 -; SI-NEXT: v_and_b32_e32 v14, 0xff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v45 +; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v32 +; SI-NEXT: v_or_b32_e32 v10, s5, v10 +; SI-NEXT: s_and_b32 s5, s76, 0xff +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: v_or_b32_e32 v15, v16, v15 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s72, 24 +; SI-NEXT: v_or_b32_e32 v11, v11, v15 +; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0 ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v14, v15, v14 -; SI-NEXT: v_or_b32_e32 v10, v10, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v10, v14, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s5, s7, s5 +; SI-NEXT: buffer_store_dword v11, v15, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v10, s5, v10 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v37 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v35 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v44 +; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v31 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v42 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v30 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: s_lshl_b32 s5, s58, 8 ; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v40 +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v29 +; SI-NEXT: v_or_b32_e32 v8, s5, v8 +; SI-NEXT: s_and_b32 s5, s56, 0xff ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s46, 24 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 +; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: s_or_b32 s5, s7, s5 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: v_or_b32_e32 v8, s5, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v31 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v29 -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v54 +; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v28 ; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v53 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v27 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: s_lshl_b32 s5, s44, 8 ; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v51 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v26 +; SI-NEXT: v_or_b32_e32 v6, s5, v6 +; SI-NEXT: s_and_b32 s5, s42, 0xff ; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: v_or_b32_e32 v8, v9, v8 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s40, 24 ; SI-NEXT: v_or_b32_e32 v7, v7, v8 ; SI-NEXT: v_add_i32_e32 v8, vcc, 36, v0 +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: s_or_b32 s5, s7, s5 ; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: v_or_b32_e32 v6, s5, v6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v23 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v21 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v7, v8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 ; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v48 +; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v25 ; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v39 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v24 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_lshl_b32 s5, s14, 8 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v38 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v23 +; SI-NEXT: v_or_b32_e32 v4, s5, v4 +; SI-NEXT: s_and_b32 s5, s12, 0xff ; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 ; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s10, 24 ; SI-NEXT: v_or_b32_e32 v5, v5, v6 ; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0 +; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: s_or_b32 s5, s7, s5 ; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: v_or_b32_e32 v4, s5, v4 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v18 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v16 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v34 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v21 ; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v33 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v19 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: s_lshl_b32 s5, s8, 8 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v32 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v17 +; SI-NEXT: v_or_b32_e32 v2, s5, v2 +; SI-NEXT: s_and_b32 s5, s6, 0xff ; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 ; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 ; SI-NEXT: v_or_b32_e32 v3, v3, v4 ; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: s_or_b32 s4, s4, s5 ; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v2, s4, v2 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v13 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v14 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v26 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v13 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v12 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s85, v40, 29 +; SI-NEXT: v_readlane_b32 s84, v40, 28 +; SI-NEXT: v_readlane_b32 s83, v40, 27 +; SI-NEXT: v_readlane_b32 s82, v40, 26 +; SI-NEXT: v_readlane_b32 s81, v40, 25 +; SI-NEXT: v_readlane_b32 s80, v40, 24 +; SI-NEXT: v_readlane_b32 s71, v40, 23 +; SI-NEXT: v_readlane_b32 s70, v40, 22 +; SI-NEXT: v_readlane_b32 s69, v40, 21 +; SI-NEXT: v_readlane_b32 s68, v40, 20 +; SI-NEXT: v_readlane_b32 s67, v40, 19 +; SI-NEXT: v_readlane_b32 s66, v40, 18 +; SI-NEXT: v_readlane_b32 s65, v40, 17 +; SI-NEXT: v_readlane_b32 s64, v40, 16 +; SI-NEXT: v_readlane_b32 s55, v40, 15 +; SI-NEXT: v_readlane_b32 s54, v40, 14 +; SI-NEXT: v_readlane_b32 s53, v40, 13 +; SI-NEXT: v_readlane_b32 s52, v40, 12 +; SI-NEXT: v_readlane_b32 s51, v40, 11 +; SI-NEXT: v_readlane_b32 s50, v40, 10 +; SI-NEXT: v_readlane_b32 s49, v40, 9 +; SI-NEXT: v_readlane_b32 s48, v40, 8 +; SI-NEXT: v_readlane_b32 s39, v40, 7 +; SI-NEXT: v_readlane_b32 s38, v40, 6 +; SI-NEXT: v_readlane_b32 s37, v40, 5 +; SI-NEXT: v_readlane_b32 s36, v40, 4 +; SI-NEXT: v_readlane_b32 s35, v40, 3 +; SI-NEXT: v_readlane_b32 s34, v40, 2 +; SI-NEXT: v_readlane_b32 s31, v40, 1 +; SI-NEXT: v_readlane_b32 s30, v40, 0 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] -; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr62 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; kill: killed $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v16f32_to_v64i8_scalar: ; VI: ; %bb.0: @@ -30813,96 +30969,113 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32 ; SI-LABEL: bitcast_v8i64_to_v32i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v3, v2 -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_mov_b32_e32 v30, v1 -; SI-NEXT: v_mov_b32_e32 v28, v0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 +; SI-NEXT: v_readfirstlane_b32 s4, v0 +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_cbranch_scc0 .LBB57_4 +; SI-NEXT: ; %bb.1: ; %cmp.false +; SI-NEXT: s_lshr_b32 s56, s5, 16 +; SI-NEXT: s_lshr_b32 s57, s29, 16 +; SI-NEXT: s_lshr_b32 s58, s27, 16 +; SI-NEXT: s_lshr_b32 s59, s25, 16 +; SI-NEXT: s_lshr_b32 s60, s23, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 16 +; SI-NEXT: s_lshr_b32 s63, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: s_cbranch_execnz .LBB57_3 +; SI-NEXT: .LBB57_2: ; %cmp.true +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s28, s28, 3 +; SI-NEXT: s_addc_u32 s29, s29, 0 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s56, s5, 16 +; SI-NEXT: s_lshr_b32 s57, s29, 16 +; SI-NEXT: s_lshr_b32 s58, s27, 16 +; SI-NEXT: s_lshr_b32 s59, s25, 16 +; SI-NEXT: s_lshr_b32 s60, s23, 16 +; SI-NEXT: s_lshr_b32 s61, s21, 16 +; SI-NEXT: s_lshr_b32 s62, s19, 16 +; SI-NEXT: s_lshr_b32 s63, s17, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[16:17], 16 +; SI-NEXT: .LBB57_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s44 ; SI-NEXT: v_mov_b32_e32 v2, s17 +; SI-NEXT: v_mov_b32_e32 v3, s63 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s42 ; SI-NEXT: v_mov_b32_e32 v6, s19 +; SI-NEXT: v_mov_b32_e32 v7, s62 ; SI-NEXT: v_mov_b32_e32 v8, s20 +; SI-NEXT: v_mov_b32_e32 v9, s40 ; SI-NEXT: v_mov_b32_e32 v10, s21 +; SI-NEXT: v_mov_b32_e32 v11, s61 ; SI-NEXT: v_mov_b32_e32 v12, s22 +; SI-NEXT: v_mov_b32_e32 v13, s14 ; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v15, s60 ; SI-NEXT: v_mov_b32_e32 v16, s24 +; SI-NEXT: v_mov_b32_e32 v17, s12 ; SI-NEXT: v_mov_b32_e32 v18, s25 +; SI-NEXT: v_mov_b32_e32 v19, s59 ; SI-NEXT: v_mov_b32_e32 v20, s26 +; SI-NEXT: v_mov_b32_e32 v21, s10 ; SI-NEXT: v_mov_b32_e32 v22, s27 +; SI-NEXT: v_mov_b32_e32 v23, s58 ; SI-NEXT: v_mov_b32_e32 v24, s28 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v25, s8 ; SI-NEXT: v_mov_b32_e32 v26, s29 -; SI-NEXT: s_cbranch_scc0 .LBB57_4 -; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_cbranch_execnz .LBB57_3 -; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 -; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4 -; SI-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc -; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v8 -; SI-NEXT: v_addc_u32_e32 v10, vcc, 0, v10, vcc -; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v12 -; SI-NEXT: v_addc_u32_e32 v14, vcc, 0, v14, vcc -; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16 -; SI-NEXT: v_addc_u32_e32 v18, vcc, 0, v18, vcc -; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20 -; SI-NEXT: v_addc_u32_e32 v22, vcc, 0, v22, vcc -; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24 -; SI-NEXT: v_addc_u32_e32 v26, vcc, 0, v26, vcc -; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28 -; SI-NEXT: v_addc_u32_e32 v30, vcc, 0, v30, vcc -; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v24, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v20, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v16, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v8, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v30 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v22 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: .LBB57_3: ; %end +; SI-NEXT: v_mov_b32_e32 v27, s57 +; SI-NEXT: v_mov_b32_e32 v28, s4 +; SI-NEXT: v_mov_b32_e32 v29, s6 +; SI-NEXT: v_mov_b32_e32 v30, s5 +; SI-NEXT: v_mov_b32_e32 v31, s56 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr59 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr56 ; SI-NEXT: s_branch .LBB57_2 ; ; VI-LABEL: bitcast_v8i64_to_v32i16_scalar: @@ -38073,386 +38246,449 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in ; SI-LABEL: bitcast_v8i64_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v4, s30, 0 +; SI-NEXT: v_writelane_b32 v4, s31, 1 +; SI-NEXT: v_writelane_b32 v4, s34, 2 +; SI-NEXT: v_writelane_b32 v4, s35, 3 +; SI-NEXT: v_writelane_b32 v4, s36, 4 +; SI-NEXT: v_writelane_b32 v4, s37, 5 +; SI-NEXT: v_writelane_b32 v4, s38, 6 +; SI-NEXT: v_writelane_b32 v4, s39, 7 +; SI-NEXT: v_writelane_b32 v4, s48, 8 +; SI-NEXT: v_writelane_b32 v4, s49, 9 +; SI-NEXT: v_writelane_b32 v4, s50, 10 +; SI-NEXT: v_writelane_b32 v4, s51, 11 +; SI-NEXT: v_writelane_b32 v4, s52, 12 +; SI-NEXT: v_writelane_b32 v4, s53, 13 +; SI-NEXT: v_writelane_b32 v4, s54, 14 +; SI-NEXT: v_writelane_b32 v4, s55, 15 +; SI-NEXT: v_writelane_b32 v4, s64, 16 +; SI-NEXT: v_writelane_b32 v4, s65, 17 +; SI-NEXT: v_writelane_b32 v4, s66, 18 +; SI-NEXT: v_writelane_b32 v4, s67, 19 +; SI-NEXT: v_writelane_b32 v4, s68, 20 +; SI-NEXT: v_writelane_b32 v4, s69, 21 +; SI-NEXT: v_writelane_b32 v4, s70, 22 +; SI-NEXT: v_writelane_b32 v4, s71, 23 +; SI-NEXT: v_writelane_b32 v4, s80, 24 +; SI-NEXT: v_writelane_b32 v4, s81, 25 +; SI-NEXT: v_writelane_b32 v4, s82, 26 +; SI-NEXT: v_writelane_b32 v4, s83, 27 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v2 +; SI-NEXT: v_writelane_b32 v4, s84, 28 +; SI-NEXT: v_readfirstlane_b32 s4, v1 +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_writelane_b32 v4, s85, 29 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s29, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s27, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 8 -; SI-NEXT: v_alignbit_b32 v13, s25, v10, 24 -; SI-NEXT: v_alignbit_b32 v15, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 8 -; SI-NEXT: v_alignbit_b32 v11, s23, v14, 24 -; SI-NEXT: v_alignbit_b32 v12, s23, v14, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v14, 8 -; SI-NEXT: v_alignbit_b32 v16, s21, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s21, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s21, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s19, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s19, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s19, v21, 8 -; SI-NEXT: v_alignbit_b32 v23, s17, v22, 24 -; SI-NEXT: v_alignbit_b32 v24, s17, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, s17, v22, 8 -; SI-NEXT: s_lshr_b32 s8, s6, 24 -; SI-NEXT: s_lshr_b32 s9, s6, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_lshr_b32 s38, s5, 24 +; SI-NEXT: s_lshr_b32 s39, s5, 16 +; SI-NEXT: s_lshr_b32 s48, s5, 8 +; SI-NEXT: s_lshr_b32 s49, s29, 24 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s29, 8 +; SI-NEXT: s_lshr_b32 s52, s27, 24 +; SI-NEXT: s_lshr_b32 s53, s27, 16 +; SI-NEXT: s_lshr_b32 s54, s27, 8 +; SI-NEXT: s_lshr_b32 s55, s25, 24 +; SI-NEXT: s_lshr_b32 s64, s25, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 8 +; SI-NEXT: s_lshr_b32 s66, s23, 24 +; SI-NEXT: s_lshr_b32 s67, s23, 16 +; SI-NEXT: s_lshr_b32 s68, s23, 8 +; SI-NEXT: s_lshr_b32 s69, s21, 24 +; SI-NEXT: s_lshr_b32 s70, s21, 16 +; SI-NEXT: s_lshr_b32 s71, s21, 8 +; SI-NEXT: s_lshr_b32 s80, s19, 24 +; SI-NEXT: s_lshr_b32 s81, s19, 16 +; SI-NEXT: s_lshr_b32 s82, s19, 8 +; SI-NEXT: s_lshr_b32 s83, s17, 24 +; SI-NEXT: s_lshr_b32 s84, s17, 16 +; SI-NEXT: s_lshr_b32 s85, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v3, s7 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v22, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v3, 24 -; SI-NEXT: v_alignbit_b32 v2, s6, v3, 16 -; SI-NEXT: v_alignbit_b32 v3, s6, v3, 8 -; SI-NEXT: v_alignbit_b32 v4, s29, v6, 24 -; SI-NEXT: v_alignbit_b32 v5, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 8 -; SI-NEXT: v_alignbit_b32 v7, s27, v9, 24 -; SI-NEXT: v_alignbit_b32 v8, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 8 -; SI-NEXT: v_alignbit_b32 v13, s25, v10, 24 -; SI-NEXT: v_alignbit_b32 v15, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 8 -; SI-NEXT: v_alignbit_b32 v11, s23, v14, 24 -; SI-NEXT: v_alignbit_b32 v12, s23, v14, 16 -; SI-NEXT: v_alignbit_b32 v14, s23, v14, 8 -; SI-NEXT: v_alignbit_b32 v16, s21, v18, 24 -; SI-NEXT: v_alignbit_b32 v17, s21, v18, 16 -; SI-NEXT: v_alignbit_b32 v18, s21, v18, 8 -; SI-NEXT: v_alignbit_b32 v19, s19, v21, 24 -; SI-NEXT: v_alignbit_b32 v20, s19, v21, 16 -; SI-NEXT: v_alignbit_b32 v21, s19, v21, 8 -; SI-NEXT: v_alignbit_b32 v23, s17, v22, 24 -; SI-NEXT: v_alignbit_b32 v24, s17, v22, 16 -; SI-NEXT: v_alignbit_b32 v22, s17, v22, 8 -; SI-NEXT: s_lshr_b32 s8, s6, 24 -; SI-NEXT: s_lshr_b32 s9, s6, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s38, s5, 24 +; SI-NEXT: s_lshr_b32 s39, s5, 16 +; SI-NEXT: s_lshr_b32 s48, s5, 8 +; SI-NEXT: s_lshr_b32 s49, s29, 24 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s29, 8 +; SI-NEXT: s_lshr_b32 s52, s27, 24 +; SI-NEXT: s_lshr_b32 s53, s27, 16 +; SI-NEXT: s_lshr_b32 s54, s27, 8 +; SI-NEXT: s_lshr_b32 s55, s25, 24 +; SI-NEXT: s_lshr_b32 s64, s25, 16 +; SI-NEXT: s_lshr_b32 s65, s25, 8 +; SI-NEXT: s_lshr_b32 s66, s23, 24 +; SI-NEXT: s_lshr_b32 s67, s23, 16 +; SI-NEXT: s_lshr_b32 s68, s23, 8 +; SI-NEXT: s_lshr_b32 s69, s21, 24 +; SI-NEXT: s_lshr_b32 s70, s21, 16 +; SI-NEXT: s_lshr_b32 s71, s21, 8 +; SI-NEXT: s_lshr_b32 s80, s19, 24 +; SI-NEXT: s_lshr_b32 s81, s19, 16 +; SI-NEXT: s_lshr_b32 s82, s19, 8 +; SI-NEXT: s_lshr_b32 s83, s17, 24 +; SI-NEXT: s_lshr_b32 s84, s17, 16 +; SI-NEXT: s_lshr_b32 s85, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[14:15], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[16:17], 8 ; SI-NEXT: .LBB69_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 -; SI-NEXT: v_or_b32_e32 v22, s4, v22 -; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s5, s63, 8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s62, 0xff -; SI-NEXT: v_and_b32_e32 v24, 0xff, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s61, 24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_mov_b32_e32 v23, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v21, s4, v21 -; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s5, s60, 8 -; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s59, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s58, 24 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v19, v21, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v20, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v18 -; SI-NEXT: v_or_b32_e32 v18, s4, v18 -; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s5, s57, 8 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s56, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s47, 24 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v16, v18, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v20, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v17, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s5, s46, 8 -; SI-NEXT: v_and_b32_e32 v12, 0xff, v12 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s44, 24 -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 -; SI-NEXT: v_or_b32_e32 v11, v11, v12 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 +; SI-NEXT: s_lshl_b32 s7, s36, 8 +; SI-NEXT: s_and_b32 s9, s16, 0xff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s9, s34, 0xff +; SI-NEXT: s_lshl_b32 s11, s30, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v1, s7 +; SI-NEXT: s_and_b32 s7, s17, 0xff +; SI-NEXT: s_lshl_b32 s9, s85, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s84, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s83, 24 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_lshl_b32 s7, s94, 8 +; SI-NEXT: s_and_b32 s9, s18, 0xff +; SI-NEXT: s_or_b32 s7, s9, s7 +; SI-NEXT: s_and_b32 s9, s92, 0xff +; SI-NEXT: s_lshl_b32 s11, s90, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: s_and_b32 s7, s19, 0xff +; SI-NEXT: s_lshl_b32 s9, s82, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s81, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s80, 24 +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v11, v14, v11 -; SI-NEXT: v_add_i32_e32 v12, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v17, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s5, s43, 8 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xff, v15 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s42, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s9, s76, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s72, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s62, 24 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s21, 0xff +; SI-NEXT: s_lshl_b32 s9, s71, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s70, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s69, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s41, 24 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s5, s40, 8 -; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s14, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s22, 0xff +; SI-NEXT: s_lshl_b32 s9, s88, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s78, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s74, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s23, 0xff +; SI-NEXT: s_lshl_b32 s9, s68, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s67, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s66, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s24, 0xff +; SI-NEXT: s_lshl_b32 s9, s60, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s58, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s56, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s25, 0xff +; SI-NEXT: s_lshl_b32 s9, s65, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s64, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s55, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v7, v9, v7 -; SI-NEXT: v_add_i32_e32 v8, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s5, s13, 8 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s12, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s11, s11, 24 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s26, 0xff +; SI-NEXT: s_lshl_b32 s9, s46, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s44, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s42, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s27, 0xff +; SI-NEXT: s_lshl_b32 s9, s54, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s53, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s52, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s40, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s14, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s12, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s7, s29, 0xff +; SI-NEXT: s_lshl_b32 s9, s51, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s50, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s11, s49, 24 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s7, s10, 8 +; SI-NEXT: s_or_b32 s4, s4, s7 +; SI-NEXT: s_and_b32 s7, s8, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s6, s6, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s11, s5 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 -; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s10, 8 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s48, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; SI-NEXT: s_and_b32 s5, s39, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s8, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_lshl_b32 s6, s38, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s85, v4, 29 +; SI-NEXT: v_readlane_b32 s84, v4, 28 +; SI-NEXT: v_readlane_b32 s83, v4, 27 +; SI-NEXT: v_readlane_b32 s82, v4, 26 +; SI-NEXT: v_readlane_b32 s81, v4, 25 +; SI-NEXT: v_readlane_b32 s80, v4, 24 +; SI-NEXT: v_readlane_b32 s71, v4, 23 +; SI-NEXT: v_readlane_b32 s70, v4, 22 +; SI-NEXT: v_readlane_b32 s69, v4, 21 +; SI-NEXT: v_readlane_b32 s68, v4, 20 +; SI-NEXT: v_readlane_b32 s67, v4, 19 +; SI-NEXT: v_readlane_b32 s66, v4, 18 +; SI-NEXT: v_readlane_b32 s65, v4, 17 +; SI-NEXT: v_readlane_b32 s64, v4, 16 +; SI-NEXT: v_readlane_b32 s55, v4, 15 +; SI-NEXT: v_readlane_b32 s54, v4, 14 +; SI-NEXT: v_readlane_b32 s53, v4, 13 +; SI-NEXT: v_readlane_b32 s52, v4, 12 +; SI-NEXT: v_readlane_b32 s51, v4, 11 +; SI-NEXT: v_readlane_b32 s50, v4, 10 +; SI-NEXT: v_readlane_b32 s49, v4, 9 +; SI-NEXT: v_readlane_b32 s48, v4, 8 +; SI-NEXT: v_readlane_b32 s39, v4, 7 +; SI-NEXT: v_readlane_b32 s38, v4, 6 +; SI-NEXT: v_readlane_b32 s37, v4, 5 +; SI-NEXT: v_readlane_b32 s36, v4, 4 +; SI-NEXT: v_readlane_b32 s35, v4, 3 +; SI-NEXT: v_readlane_b32 s34, v4, 2 +; SI-NEXT: v_readlane_b32 s31, v4, 1 +; SI-NEXT: v_readlane_b32 s30, v4, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr85 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr83 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr57 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr15 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v8i64_to_v64i8_scalar: @@ -44167,100 +44403,100 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2 ; SI-NEXT: v_mov_b32_e32 v33, v1 ; SI-NEXT: v_mov_b32_e32 v32, v0 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v1, s17 -; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v5, s19 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s21 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v16, s24 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v21, s27 -; SI-NEXT: v_mov_b32_e32 v24, s28 +; SI-NEXT: v_mov_b32_e32 v34, s16 +; SI-NEXT: v_mov_b32_e32 v35, s17 +; SI-NEXT: v_mov_b32_e32 v36, s18 +; SI-NEXT: v_mov_b32_e32 v37, s19 +; SI-NEXT: v_mov_b32_e32 v38, s20 +; SI-NEXT: v_mov_b32_e32 v39, s21 +; SI-NEXT: v_mov_b32_e32 v48, s22 +; SI-NEXT: v_mov_b32_e32 v49, s23 +; SI-NEXT: v_mov_b32_e32 v50, s24 +; SI-NEXT: v_mov_b32_e32 v51, s25 +; SI-NEXT: v_mov_b32_e32 v52, s26 +; SI-NEXT: v_mov_b32_e32 v53, s27 +; SI-NEXT: v_mov_b32_e32 v54, s28 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v25, s29 +; SI-NEXT: v_mov_b32_e32 v55, s29 ; SI-NEXT: s_cbranch_scc0 .LBB73_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v33, v32, 16 -; SI-NEXT: v_alignbit_b32 v48, v25, v24, 16 -; SI-NEXT: v_alignbit_b32 v39, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v38, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v37, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v36, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v35, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v34, v1, v0, 16 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 +; SI-NEXT: v_lshr_b64 v[29:30], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[54:55], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 ; SI-NEXT: s_cbranch_execnz .LBB73_3 ; SI-NEXT: .LBB73_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0 -; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0 -; SI-NEXT: v_add_f64 v[8:9], v[8:9], 1.0 -; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0 -; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0 -; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0 ; SI-NEXT: v_add_f64 v[32:33], v[32:33], 1.0 -; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0 -; SI-NEXT: v_alignbit_b32 v29, v33, v32, 16 -; SI-NEXT: v_alignbit_b32 v48, v25, v24, 16 -; SI-NEXT: v_alignbit_b32 v39, v21, v20, 16 -; SI-NEXT: v_alignbit_b32 v38, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v37, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v36, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v35, v5, v4, 16 -; SI-NEXT: v_alignbit_b32 v34, v1, v0, 16 +; SI-NEXT: v_add_f64 v[54:55], v[54:55], 1.0 +; SI-NEXT: v_add_f64 v[52:53], v[52:53], 1.0 +; SI-NEXT: v_add_f64 v[50:51], v[50:51], 1.0 +; SI-NEXT: v_add_f64 v[48:49], v[48:49], 1.0 +; SI-NEXT: v_add_f64 v[38:39], v[38:39], 1.0 +; SI-NEXT: v_add_f64 v[36:37], v[36:37], 1.0 +; SI-NEXT: v_add_f64 v[34:35], v[34:35], 1.0 +; SI-NEXT: v_lshr_b64 v[29:30], v[32:33], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[54:55], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[52:53], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[13:14], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[38:39], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[36:37], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[34:35], 16 ; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v25 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v35 ; SI-NEXT: .LBB73_3: ; %end -; SI-NEXT: v_mov_b32_e32 v2, v1 -; SI-NEXT: v_mov_b32_e32 v6, v5 -; SI-NEXT: v_mov_b32_e32 v10, v9 -; SI-NEXT: v_mov_b32_e32 v14, v13 -; SI-NEXT: v_mov_b32_e32 v18, v17 -; SI-NEXT: v_mov_b32_e32 v22, v21 -; SI-NEXT: v_mov_b32_e32 v26, v25 +; SI-NEXT: v_mov_b32_e32 v0, v34 +; SI-NEXT: v_mov_b32_e32 v2, v35 +; SI-NEXT: v_mov_b32_e32 v4, v36 +; SI-NEXT: v_mov_b32_e32 v6, v37 +; SI-NEXT: v_mov_b32_e32 v8, v38 +; SI-NEXT: v_mov_b32_e32 v10, v39 +; SI-NEXT: v_mov_b32_e32 v12, v48 +; SI-NEXT: v_mov_b32_e32 v14, v49 +; SI-NEXT: v_mov_b32_e32 v16, v50 +; SI-NEXT: v_mov_b32_e32 v18, v51 +; SI-NEXT: v_mov_b32_e32 v20, v52 +; SI-NEXT: v_mov_b32_e32 v22, v53 +; SI-NEXT: v_mov_b32_e32 v24, v54 +; SI-NEXT: v_mov_b32_e32 v26, v55 ; SI-NEXT: v_mov_b32_e32 v28, v32 ; SI-NEXT: v_mov_b32_e32 v30, v33 -; SI-NEXT: v_mov_b32_e32 v1, v34 -; SI-NEXT: v_mov_b32_e32 v5, v35 -; SI-NEXT: v_mov_b32_e32 v9, v36 -; SI-NEXT: v_mov_b32_e32 v13, v37 -; SI-NEXT: v_mov_b32_e32 v17, v38 -; SI-NEXT: v_mov_b32_e32 v21, v39 -; SI-NEXT: v_mov_b32_e32 v25, v48 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB73_4: -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr13 ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr17 ; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: s_branch .LBB73_2 ; ; VI-LABEL: bitcast_v8f64_to_v32i16_scalar: @@ -51191,376 +51427,435 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32 ; SI-LABEL: bitcast_v8f64_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v40, s30, 0 +; SI-NEXT: v_writelane_b32 v40, s31, 1 +; SI-NEXT: v_writelane_b32 v40, s34, 2 +; SI-NEXT: v_writelane_b32 v40, s35, 3 +; SI-NEXT: v_writelane_b32 v40, s36, 4 +; SI-NEXT: v_writelane_b32 v40, s37, 5 +; SI-NEXT: v_writelane_b32 v40, s38, 6 +; SI-NEXT: v_writelane_b32 v40, s39, 7 +; SI-NEXT: v_writelane_b32 v40, s48, 8 +; SI-NEXT: v_writelane_b32 v40, s49, 9 +; SI-NEXT: v_writelane_b32 v40, s50, 10 +; SI-NEXT: v_writelane_b32 v40, s51, 11 +; SI-NEXT: v_writelane_b32 v40, s52, 12 +; SI-NEXT: v_writelane_b32 v40, s53, 13 +; SI-NEXT: v_writelane_b32 v40, s54, 14 +; SI-NEXT: v_writelane_b32 v40, s55, 15 +; SI-NEXT: v_writelane_b32 v40, s64, 16 +; SI-NEXT: v_writelane_b32 v40, s65, 17 +; SI-NEXT: v_writelane_b32 v40, s66, 18 +; SI-NEXT: v_writelane_b32 v40, s67, 19 +; SI-NEXT: v_writelane_b32 v40, s68, 20 +; SI-NEXT: v_writelane_b32 v40, s69, 21 +; SI-NEXT: v_writelane_b32 v40, s70, 22 +; SI-NEXT: v_writelane_b32 v40, s71, 23 +; SI-NEXT: v_writelane_b32 v40, s80, 24 +; SI-NEXT: v_writelane_b32 v40, s81, 25 +; SI-NEXT: v_writelane_b32 v40, s82, 26 +; SI-NEXT: v_writelane_b32 v40, s83, 27 +; SI-NEXT: v_writelane_b32 v40, s84, 28 +; SI-NEXT: v_writelane_b32 v40, s85, 29 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3 +; SI-NEXT: v_writelane_b32 v40, s86, 30 ; SI-NEXT: v_readfirstlane_b32 s4, v1 ; SI-NEXT: s_and_b64 s[6:7], vcc, exec ; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_writelane_b32 v40, s87, 31 ; SI-NEXT: s_cbranch_scc0 .LBB85_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_alignbit_b32 v2, s5, v1, 24 -; SI-NEXT: v_alignbit_b32 v17, s5, v1, 16 -; SI-NEXT: v_alignbit_b32 v18, s5, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s28 -; SI-NEXT: v_alignbit_b32 v20, s29, v1, 24 -; SI-NEXT: v_alignbit_b32 v4, s29, v1, 16 -; SI-NEXT: v_alignbit_b32 v19, s29, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s26 -; SI-NEXT: v_alignbit_b32 v6, s27, v1, 24 -; SI-NEXT: v_alignbit_b32 v21, s27, v1, 16 -; SI-NEXT: v_alignbit_b32 v22, s27, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s24 -; SI-NEXT: v_alignbit_b32 v8, s25, v1, 24 -; SI-NEXT: v_alignbit_b32 v23, s25, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, s25, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s22 -; SI-NEXT: v_alignbit_b32 v10, s23, v1, 24 -; SI-NEXT: v_alignbit_b32 v25, s23, v1, 16 -; SI-NEXT: v_alignbit_b32 v26, s23, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s20 -; SI-NEXT: v_alignbit_b32 v12, s21, v1, 24 -; SI-NEXT: v_alignbit_b32 v14, s21, v1, 16 -; SI-NEXT: v_alignbit_b32 v16, s21, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s18 -; SI-NEXT: v_alignbit_b32 v27, s19, v1, 24 -; SI-NEXT: v_alignbit_b32 v28, s19, v1, 16 -; SI-NEXT: v_alignbit_b32 v29, s19, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s16 -; SI-NEXT: v_alignbit_b32 v30, s17, v1, 24 -; SI-NEXT: v_alignbit_b32 v31, s17, v1, 16 -; SI-NEXT: v_alignbit_b32 v32, s17, v1, 8 -; SI-NEXT: s_lshr_b32 s8, s5, 24 -; SI-NEXT: s_lshr_b32 s9, s5, 16 -; SI-NEXT: s_lshr_b32 s10, s5, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 +; SI-NEXT: s_lshr_b32 s48, s5, 24 +; SI-NEXT: s_lshr_b32 s49, s5, 16 +; SI-NEXT: s_lshr_b32 s50, s5, 8 +; SI-NEXT: s_lshr_b32 s51, s29, 24 +; SI-NEXT: s_lshr_b32 s52, s29, 16 +; SI-NEXT: s_lshr_b32 s53, s29, 8 +; SI-NEXT: s_lshr_b32 s54, s27, 24 +; SI-NEXT: s_lshr_b32 s55, s27, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 8 +; SI-NEXT: s_lshr_b32 s65, s25, 24 +; SI-NEXT: s_lshr_b32 s66, s25, 16 +; SI-NEXT: s_lshr_b32 s67, s25, 8 +; SI-NEXT: s_lshr_b32 s68, s23, 24 +; SI-NEXT: s_lshr_b32 s69, s23, 16 +; SI-NEXT: s_lshr_b32 s70, s23, 8 +; SI-NEXT: s_lshr_b32 s71, s21, 24 +; SI-NEXT: s_lshr_b32 s80, s21, 16 +; SI-NEXT: s_lshr_b32 s81, s21, 8 +; SI-NEXT: s_lshr_b32 s82, s19, 24 +; SI-NEXT: s_lshr_b32 s83, s19, 16 +; SI-NEXT: s_lshr_b32 s84, s19, 8 +; SI-NEXT: s_lshr_b32 s85, s17, 24 +; SI-NEXT: s_lshr_b32 s86, s17, 16 +; SI-NEXT: s_lshr_b32 s87, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[28:29], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 8 +; SI-NEXT: s_lshr_b64 s[76:77], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[24:25], 24 +; SI-NEXT: s_lshr_b64 s[94:95], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[24:25], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[22:23], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[38:39], s[22:23], 8 +; SI-NEXT: s_lshr_b64 s[8:9], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[58:59], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB85_4 ; SI-NEXT: .LBB85_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[15:16], s[16:17], 1.0 -; SI-NEXT: v_add_f64 v[13:14], s[18:19], 1.0 -; SI-NEXT: v_add_f64 v[11:12], s[20:21], 1.0 -; SI-NEXT: v_add_f64 v[9:10], s[22:23], 1.0 -; SI-NEXT: v_add_f64 v[7:8], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[28:29], s[18:19], 1.0 ; SI-NEXT: v_add_f64 v[5:6], s[26:27], 1.0 -; SI-NEXT: v_add_f64 v[1:2], s[4:5], 1.0 +; SI-NEXT: v_add_f64 v[13:14], s[22:23], 1.0 ; SI-NEXT: v_add_f64 v[3:4], s[28:29], 1.0 -; SI-NEXT: v_readfirstlane_b32 s5, v2 -; SI-NEXT: v_readfirstlane_b32 s29, v4 -; SI-NEXT: v_readfirstlane_b32 s27, v6 +; SI-NEXT: v_lshr_b64 v[48:49], v[28:29], 24 +; SI-NEXT: v_add_f64 v[1:2], s[4:5], 1.0 +; SI-NEXT: v_add_f64 v[7:8], s[24:25], 1.0 +; SI-NEXT: v_add_f64 v[20:21], s[20:21], 1.0 +; SI-NEXT: v_add_f64 v[32:33], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[22:23], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[28:29], 16 +; SI-NEXT: v_lshr_b64 v[15:16], v[3:4], 24 +; SI-NEXT: v_lshr_b64 v[23:24], v[5:6], 8 +; SI-NEXT: v_lshr_b64 v[35:36], v[13:14], 8 +; SI-NEXT: v_lshr_b64 v[50:51], v[28:29], 8 +; SI-NEXT: v_lshr_b64 v[9:10], v[1:2], 24 +; SI-NEXT: v_lshr_b64 v[16:17], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 24 +; SI-NEXT: v_lshr_b64 v[36:37], v[20:21], 24 +; SI-NEXT: v_lshr_b64 v[51:52], v[32:33], 24 +; SI-NEXT: v_lshr_b64 v[10:11], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[17:18], v[3:4], 8 +; SI-NEXT: v_lshr_b64 v[25:26], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[32:33], 16 +; SI-NEXT: v_readfirstlane_b32 s17, v33 +; SI-NEXT: v_readfirstlane_b32 s19, v29 +; SI-NEXT: v_readfirstlane_b32 s21, v21 +; SI-NEXT: v_readfirstlane_b32 s23, v14 ; SI-NEXT: v_readfirstlane_b32 s25, v8 -; SI-NEXT: v_readfirstlane_b32 s23, v10 -; SI-NEXT: v_readfirstlane_b32 s21, v12 -; SI-NEXT: v_readfirstlane_b32 s19, v14 -; SI-NEXT: v_readfirstlane_b32 s17, v16 -; SI-NEXT: v_alignbit_b32 v2, s5, v1, 24 -; SI-NEXT: v_alignbit_b32 v17, s5, v1, 16 -; SI-NEXT: v_alignbit_b32 v18, s5, v1, 8 -; SI-NEXT: v_alignbit_b32 v20, s29, v3, 24 -; SI-NEXT: v_alignbit_b32 v4, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v19, s29, v3, 8 -; SI-NEXT: v_alignbit_b32 v6, s27, v5, 24 -; SI-NEXT: v_alignbit_b32 v21, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, s27, v5, 8 -; SI-NEXT: v_alignbit_b32 v8, s25, v7, 24 -; SI-NEXT: v_alignbit_b32 v23, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, s25, v7, 8 -; SI-NEXT: v_alignbit_b32 v10, s23, v9, 24 -; SI-NEXT: v_alignbit_b32 v25, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v26, s23, v9, 8 -; SI-NEXT: v_alignbit_b32 v12, s21, v11, 24 -; SI-NEXT: s_lshr_b32 s8, s5, 24 -; SI-NEXT: s_lshr_b32 s9, s5, 16 -; SI-NEXT: s_lshr_b32 s10, s5, 8 -; SI-NEXT: s_lshr_b32 s11, s29, 24 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s29, 8 -; SI-NEXT: s_lshr_b32 s14, s27, 24 -; SI-NEXT: s_lshr_b32 s15, s27, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 8 -; SI-NEXT: s_lshr_b32 s41, s25, 24 -; SI-NEXT: s_lshr_b32 s42, s25, 16 -; SI-NEXT: s_lshr_b32 s43, s25, 8 -; SI-NEXT: s_lshr_b32 s44, s23, 24 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s23, 8 -; SI-NEXT: s_lshr_b32 s47, s21, 24 -; SI-NEXT: s_lshr_b32 s56, s21, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 8 -; SI-NEXT: s_lshr_b32 s58, s19, 24 -; SI-NEXT: s_lshr_b32 s59, s19, 16 -; SI-NEXT: s_lshr_b32 s60, s19, 8 -; SI-NEXT: s_lshr_b32 s61, s17, 24 -; SI-NEXT: s_lshr_b32 s62, s17, 16 -; SI-NEXT: s_lshr_b32 s63, s17, 8 -; SI-NEXT: v_alignbit_b32 v14, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v16, s21, v11, 8 -; SI-NEXT: v_alignbit_b32 v27, s19, v13, 24 -; SI-NEXT: v_alignbit_b32 v28, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v29, s19, v13, 8 -; SI-NEXT: v_alignbit_b32 v30, s17, v15, 24 -; SI-NEXT: v_alignbit_b32 v31, s17, v15, 16 -; SI-NEXT: v_alignbit_b32 v32, s17, v15, 8 +; SI-NEXT: v_readfirstlane_b32 s27, v6 +; SI-NEXT: v_readfirstlane_b32 s29, v4 +; SI-NEXT: v_readfirstlane_b32 s5, v2 +; SI-NEXT: v_lshr_b64 v[11:12], v[1:2], 8 +; SI-NEXT: v_lshr_b64 v[18:19], v[5:6], 24 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 8 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 24 +; SI-NEXT: v_lshr_b64 v[38:39], v[20:21], 8 +; SI-NEXT: v_lshr_b64 v[53:54], v[32:33], 8 +; SI-NEXT: s_lshr_b32 s48, s5, 24 +; SI-NEXT: s_lshr_b32 s49, s5, 16 +; SI-NEXT: s_lshr_b32 s50, s5, 8 +; SI-NEXT: s_lshr_b32 s51, s29, 24 +; SI-NEXT: s_lshr_b32 s52, s29, 16 +; SI-NEXT: s_lshr_b32 s53, s29, 8 +; SI-NEXT: s_lshr_b32 s54, s27, 24 +; SI-NEXT: s_lshr_b32 s55, s27, 16 +; SI-NEXT: s_lshr_b32 s64, s27, 8 +; SI-NEXT: s_lshr_b32 s65, s25, 24 +; SI-NEXT: s_lshr_b32 s66, s25, 16 +; SI-NEXT: s_lshr_b32 s67, s25, 8 +; SI-NEXT: s_lshr_b32 s68, s23, 24 +; SI-NEXT: s_lshr_b32 s69, s23, 16 +; SI-NEXT: s_lshr_b32 s70, s23, 8 +; SI-NEXT: s_lshr_b32 s71, s21, 24 +; SI-NEXT: s_lshr_b32 s80, s21, 16 +; SI-NEXT: s_lshr_b32 s81, s21, 8 +; SI-NEXT: s_lshr_b32 s82, s19, 24 +; SI-NEXT: s_lshr_b32 s83, s19, 16 +; SI-NEXT: s_lshr_b32 s84, s19, 8 +; SI-NEXT: s_lshr_b32 s85, s17, 24 +; SI-NEXT: s_lshr_b32 s86, s17, 16 +; SI-NEXT: s_lshr_b32 s87, s17, 8 ; SI-NEXT: s_branch .LBB85_5 ; SI-NEXT: .LBB85_3: -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $sgpr63 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr59 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr57 +; SI-NEXT: ; implicit-def: $sgpr87 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr85 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr45 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr83 +; SI-NEXT: ; implicit-def: $sgpr82 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $sgpr9 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr67 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr65 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr6 ; SI-NEXT: s_branch .LBB85_2 ; SI-NEXT: .LBB85_4: -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 +; SI-NEXT: v_mov_b32_e32 v32, s16 +; SI-NEXT: v_mov_b32_e32 v28, s18 +; SI-NEXT: v_mov_b32_e32 v20, s20 +; SI-NEXT: v_mov_b32_e32 v13, s22 ; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 +; SI-NEXT: v_mov_b32_e32 v5, s26 +; SI-NEXT: v_mov_b32_e32 v3, s28 +; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_mov_b32_e32 v53, s74 +; SI-NEXT: v_mov_b32_e32 v52, s62 +; SI-NEXT: v_mov_b32_e32 v51, s58 +; SI-NEXT: v_mov_b32_e32 v50, s56 +; SI-NEXT: v_mov_b32_e32 v49, s44 +; SI-NEXT: v_mov_b32_e32 v48, s42 +; SI-NEXT: v_mov_b32_e32 v38, s40 +; SI-NEXT: v_mov_b32_e32 v37, s12 +; SI-NEXT: v_mov_b32_e32 v36, s8 +; SI-NEXT: v_mov_b32_e32 v35, s38 +; SI-NEXT: v_mov_b32_e32 v34, s36 +; SI-NEXT: v_mov_b32_e32 v30, s34 +; SI-NEXT: v_mov_b32_e32 v26, s30 +; SI-NEXT: v_mov_b32_e32 v25, s94 +; SI-NEXT: v_mov_b32_e32 v24, s92 +; SI-NEXT: v_mov_b32_e32 v23, s88 +; SI-NEXT: v_mov_b32_e32 v22, s78 +; SI-NEXT: v_mov_b32_e32 v18, s76 +; SI-NEXT: v_mov_b32_e32 v17, s72 +; SI-NEXT: v_mov_b32_e32 v16, s60 +; SI-NEXT: v_mov_b32_e32 v15, s46 +; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v10, s10 +; SI-NEXT: v_mov_b32_e32 v9, s6 ; SI-NEXT: .LBB85_5: ; %end +; SI-NEXT: v_and_b32_e32 v2, 0xff, v32 +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v53 ; SI-NEXT: s_and_b32 s4, s17, 0xff -; SI-NEXT: s_lshl_b32 s6, s63, 8 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v32 -; SI-NEXT: v_and_b32_e32 v31, 0xff, v31 +; SI-NEXT: s_lshl_b32 s6, s87, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v52 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s62, 0xff -; SI-NEXT: v_or_b32_e32 v15, v15, v32 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v30 +; SI-NEXT: s_and_b32 s6, s86, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v51 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s61, 24 -; SI-NEXT: v_or_b32_e32 v30, v30, v31 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: s_lshl_b32 s7, s85, 24 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v15, v15, v30 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v15, v0, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v30, s4 -; SI-NEXT: buffer_store_dword v30, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v29 +; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v28 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v50 ; SI-NEXT: s_and_b32 s4, s19, 0xff -; SI-NEXT: s_lshl_b32 s6, s60, 8 -; SI-NEXT: v_or_b32_e32 v13, v13, v15 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v28 +; SI-NEXT: s_lshl_b32 s6, s84, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v49 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s59, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 +; SI-NEXT: s_and_b32 s6, s83, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v48 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s58, 24 -; SI-NEXT: v_or_b32_e32 v15, v27, v15 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: s_lshl_b32 s7, s82, 24 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v13, v13, v15 -; SI-NEXT: v_add_i32_e32 v15, vcc, 8, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 8, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: buffer_store_dword v15, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v16 +; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v20 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v38 ; SI-NEXT: s_and_b32 s4, s21, 0xff -; SI-NEXT: s_lshl_b32 s6, s57, 8 -; SI-NEXT: v_or_b32_e32 v11, v11, v13 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v14 +; SI-NEXT: s_lshl_b32 s6, s81, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v37 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s56, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v12 +; SI-NEXT: s_and_b32 s6, s80, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v36 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s47, 24 -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: s_lshl_b32 s7, s71, 24 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v11, v11, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, 16, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 16, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v26 +; SI-NEXT: v_add_i32_e32 v2, vcc, 20, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v13 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v35 ; SI-NEXT: s_and_b32 s4, s23, 0xff -; SI-NEXT: s_lshl_b32 s6, s46, 8 -; SI-NEXT: v_or_b32_e32 v9, v9, v11 -; SI-NEXT: v_and_b32_e32 v11, 0xff, v25 +; SI-NEXT: s_lshl_b32 s6, s70, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v34 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s45, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; SI-NEXT: s_and_b32 s6, s69, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v30 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s44, 24 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 +; SI-NEXT: s_lshl_b32 s7, s68, 24 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 24, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 24, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v24 +; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v7 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v26 ; SI-NEXT: s_and_b32 s4, s25, 0xff -; SI-NEXT: s_lshl_b32 s6, s43, 8 -; SI-NEXT: v_or_b32_e32 v7, v7, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v23 +; SI-NEXT: s_lshl_b32 s6, s67, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v25 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s42, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; SI-NEXT: s_and_b32 s6, s66, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v24 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s41, 24 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 +; SI-NEXT: s_lshl_b32 s7, s65, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v4, v6, v4 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 32, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v22 +; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v5 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v23 ; SI-NEXT: s_and_b32 s4, s27, 0xff -; SI-NEXT: s_lshl_b32 s6, s40, 8 -; SI-NEXT: v_or_b32_e32 v5, v5, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v21 +; SI-NEXT: s_lshl_b32 s6, s64, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v22 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s15, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; SI-NEXT: s_and_b32 s6, s55, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v18 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s14, 24 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 +; SI-NEXT: s_lshl_b32 s7, s54, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v4, v5, v4 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_add_i32_e32 v4, vcc, 40, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 44, v0 -; SI-NEXT: v_mov_b32_e32 v6, s4 +; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v2, 0xff, v3 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v17 ; SI-NEXT: s_and_b32 s4, s29, 0xff -; SI-NEXT: s_lshl_b32 s6, s13, 8 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v19 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_lshl_b32 s6, s53, 8 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v16 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: s_and_b32 s6, s12, 0xff -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v20 +; SI-NEXT: s_and_b32 s6, s52, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v15 ; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_lshl_b32 s7, s11, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: s_lshl_b32 s7, s51, 24 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v3, v4, v3 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s6, s7, s6 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 ; SI-NEXT: s_or_b32 s4, s4, s6 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v18 +; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v11 ; SI-NEXT: s_and_b32 s4, s5, 0xff -; SI-NEXT: s_lshl_b32 s5, s10, 8 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v17 +; SI-NEXT: s_lshl_b32 s5, s50, 8 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v10 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s9, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; SI-NEXT: s_and_b32 s5, s49, 0xff +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v9 ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s8, 24 +; SI-NEXT: s_lshl_b32 s6, s48, 24 ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v2, v3, v2 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 @@ -51571,6 +51866,41 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s87, v40, 31 +; SI-NEXT: v_readlane_b32 s86, v40, 30 +; SI-NEXT: v_readlane_b32 s85, v40, 29 +; SI-NEXT: v_readlane_b32 s84, v40, 28 +; SI-NEXT: v_readlane_b32 s83, v40, 27 +; SI-NEXT: v_readlane_b32 s82, v40, 26 +; SI-NEXT: v_readlane_b32 s81, v40, 25 +; SI-NEXT: v_readlane_b32 s80, v40, 24 +; SI-NEXT: v_readlane_b32 s71, v40, 23 +; SI-NEXT: v_readlane_b32 s70, v40, 22 +; SI-NEXT: v_readlane_b32 s69, v40, 21 +; SI-NEXT: v_readlane_b32 s68, v40, 20 +; SI-NEXT: v_readlane_b32 s67, v40, 19 +; SI-NEXT: v_readlane_b32 s66, v40, 18 +; SI-NEXT: v_readlane_b32 s65, v40, 17 +; SI-NEXT: v_readlane_b32 s64, v40, 16 +; SI-NEXT: v_readlane_b32 s55, v40, 15 +; SI-NEXT: v_readlane_b32 s54, v40, 14 +; SI-NEXT: v_readlane_b32 s53, v40, 13 +; SI-NEXT: v_readlane_b32 s52, v40, 12 +; SI-NEXT: v_readlane_b32 s51, v40, 11 +; SI-NEXT: v_readlane_b32 s50, v40, 10 +; SI-NEXT: v_readlane_b32 s49, v40, 9 +; SI-NEXT: v_readlane_b32 s48, v40, 8 +; SI-NEXT: v_readlane_b32 s39, v40, 7 +; SI-NEXT: v_readlane_b32 s38, v40, 6 +; SI-NEXT: v_readlane_b32 s37, v40, 5 +; SI-NEXT: v_readlane_b32 s36, v40, 4 +; SI-NEXT: v_readlane_b32 s35, v40, 3 +; SI-NEXT: v_readlane_b32 s34, v40, 2 +; SI-NEXT: v_readlane_b32 s31, v40, 1 +; SI-NEXT: v_readlane_b32 s30, v40, 0 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; @@ -58201,95 +58531,120 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i ; SI-LABEL: bitcast_v32f16_to_v32i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v31, v17 -; SI-NEXT: v_mov_b32_e32 v30, v16 -; SI-NEXT: v_mov_b32_e32 v29, v15 -; SI-NEXT: v_mov_b32_e32 v28, v14 -; SI-NEXT: v_mov_b32_e32 v15, v1 -; SI-NEXT: v_mov_b32_e32 v14, v0 +; SI-NEXT: v_mov_b32_e32 v21, v16 +; SI-NEXT: v_mov_b32_e32 v25, v15 +; SI-NEXT: v_mov_b32_e32 v26, v12 +; SI-NEXT: v_mov_b32_e32 v29, v11 +; SI-NEXT: v_mov_b32_e32 v22, v8 +; SI-NEXT: v_mov_b32_e32 v30, v7 +; SI-NEXT: v_mov_b32_e32 v32, v4 +; SI-NEXT: v_mov_b32_e32 v33, v3 +; SI-NEXT: v_mov_b32_e32 v34, v0 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v23, v9 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v27, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s23 ; SI-NEXT: v_cvt_f16_f32_e32 v8, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 ; SI-NEXT: v_cvt_f16_f32_e32 v11, s27 ; SI-NEXT: v_cvt_f16_f32_e32 v12, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v37, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v33, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB91_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB91_3 ; SI-NEXT: .LBB91_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 ; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 ; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v15 +; SI-NEXT: v_or_b32_e32 v14, v14, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v48 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v37 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v11 +; SI-NEXT: v_or_b32_e32 v18, v18, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v49 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v35 +; SI-NEXT: v_or_b32_e32 v6, v6, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v33 +; SI-NEXT: v_or_b32_e32 v10, v10, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v38 +; SI-NEXT: v_or_b32_e32 v22, v22, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v52 +; SI-NEXT: v_or_b32_e32 v26, v26, v29 +; SI-NEXT: v_cvt_f32_f16_e32 v29, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 @@ -58298,52 +58653,34 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 ; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_or_b32_e32 v30, v30, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_or_b32_e32 v26, v26, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_or_b32_e32 v22, v22, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 ; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 ; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_or_b32_e32 v18, v18, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_or_b32_e32 v14, v14, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v11 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 @@ -58352,19 +58689,23 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_or_b32_e32 v10, v10, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v32 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v32 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v31 +; SI-NEXT: v_lshr_b64 v[48:49], v[17:18], 16 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 ; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; SI-NEXT: v_or_b32_e32 v30, v30, v32 ; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; SI-NEXT: v_lshr_b64 v[37:38], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[29:30], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 @@ -58373,15 +58714,15 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i ; SI-NEXT: v_or_b32_e32 v20, v20, v21 ; SI-NEXT: v_or_b32_e32 v24, v24, v25 ; SI-NEXT: v_or_b32_e32 v28, v28, v29 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 -; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16 ; SI-NEXT: .LBB91_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v37 +; SI-NEXT: v_mov_b32_e32 v5, v35 +; SI-NEXT: v_mov_b32_e32 v9, v33 +; SI-NEXT: v_mov_b32_e32 v13, v38 +; SI-NEXT: v_mov_b32_e32 v17, v48 +; SI-NEXT: v_mov_b32_e32 v21, v49 +; SI-NEXT: v_mov_b32_e32 v25, v52 +; SI-NEXT: v_mov_b32_e32 v29, v50 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB91_4: ; SI-NEXT: s_branch .LBB91_2 @@ -60786,185 +61127,185 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a ; SI-NEXT: s_waitcnt expcnt(6) ; SI-NEXT: v_mul_f32_e64 v57, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v56, 1.0, s17 -; SI-NEXT: v_mul_f32_e32 v35, 1.0, v0 -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v0 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v1 ; SI-NEXT: v_mul_f32_e32 v47, 1.0, v2 ; SI-NEXT: v_mul_f32_e32 v46, 1.0, v3 -; SI-NEXT: v_mul_f32_e32 v39, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v53, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v5 ; SI-NEXT: v_mul_f32_e32 v45, 1.0, v6 ; SI-NEXT: v_mul_f32_e32 v44, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v8 -; SI-NEXT: v_mul_f32_e32 v50, 1.0, v9 +; SI-NEXT: v_mul_f32_e32 v55, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v21, 1.0, v9 ; SI-NEXT: v_mul_f32_e32 v43, 1.0, v10 ; SI-NEXT: v_mul_f32_e32 v42, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v12 -; SI-NEXT: v_mul_f32_e32 v52, 1.0, v13 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13 ; SI-NEXT: v_mul_f32_e32 v41, 1.0, v14 ; SI-NEXT: v_mul_f32_e32 v40, 1.0, v15 -; SI-NEXT: v_mul_f32_e32 v55, 1.0, v16 -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v17 -; SI-NEXT: v_mul_f32_e64 v33, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v32, 1.0, s19 +; SI-NEXT: v_mul_f32_e32 v29, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_mul_f32_e64 v32, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mul_f32_e64 v63, 1.0, s20 ; SI-NEXT: v_mul_f32_e64 v62, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v37, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v51, 1.0, s22 +; SI-NEXT: v_mul_f32_e64 v5, 1.0, s23 ; SI-NEXT: v_mul_f32_e64 v61, 1.0, s24 ; SI-NEXT: v_mul_f32_e64 v60, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v49, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v48, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v54, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v9, 1.0, s27 ; SI-NEXT: v_mul_f32_e64 v59, 1.0, s28 ; SI-NEXT: v_mul_f32_e64 v58, 1.0, s29 ; SI-NEXT: s_cbranch_scc0 .LBB95_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v57 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v56 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v56 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v63 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v62 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v37 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v62 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v51 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v61 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v60 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v49 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v48 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v60 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v54 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9 ; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v59 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v58 -; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v35 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v34 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v39 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v47 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v46 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v39 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v52 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v45 -; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v44 -; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v51 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v50 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v21 ; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v43 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v42 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v53 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v52 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v41 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v40 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v55 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v54 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v58 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v49 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v46 +; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v53 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v44 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v55 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v42 +; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v40 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v29 ; SI-NEXT: s_cbranch_execnz .LBB95_3 ; SI-NEXT: .LBB95_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v56 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v56 ; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v57 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v28 ; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v62 ; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v63 ; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v60 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v60 ; SI-NEXT: v_alignbit_b32 v4, v4, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v61 -; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v5 -; SI-NEXT: v_alignbit_b32 v8, v6, v2, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v58 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v59 ; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v6 -; SI-NEXT: v_alignbit_b32 v12, v7, v2, 16 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v46 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v47 +; SI-NEXT: v_alignbit_b32 v8, v7, v2, 16 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v58 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v59 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v7 -; SI-NEXT: v_alignbit_b32 v16, v9, v2, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v44 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; SI-NEXT: v_alignbit_b32 v12, v10, v2, 16 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v46 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v47 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 +; SI-NEXT: v_alignbit_b32 v16, v11, v2, 16 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v44 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v45 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 +; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v9 -; SI-NEXT: v_alignbit_b32 v20, v10, v2, 16 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v42 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v11 +; SI-NEXT: v_alignbit_b32 v20, v14, v2, 16 +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v42 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v43 -; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_alignbit_b32 v24, v11, v2, 16 -; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v40 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14 +; SI-NEXT: v_alignbit_b32 v24, v15, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v41 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 +; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v40 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v11 -; SI-NEXT: v_alignbit_b32 v28, v13, v2, 16 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v54 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v29 +; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v25 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v13 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v21 +; SI-NEXT: v_alignbit_b32 v26, v27, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v55 ; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v13 -; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v11 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v13 +; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v11 ; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v52 -; SI-NEXT: v_alignbit_b32 v30, v31, v2, 16 +; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; SI-NEXT: v_alignbit_b32 v22, v23, v2, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v53 ; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v17 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v11 -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v10 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v50 -; SI-NEXT: v_alignbit_b32 v26, v27, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v51 -; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v10 -; SI-NEXT: v_and_b32_e32 v21, 0xffff0000, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v38 -; SI-NEXT: v_alignbit_b32 v22, v23, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v39 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v9 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v7 -; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v34 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v11 +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v39 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 ; SI-NEXT: v_alignbit_b32 v18, v19, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v35 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v49 +; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 +; SI-NEXT: v_alignbit_b32 v30, v31, v15, 16 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v7 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v48 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v10 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v9 +; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v14 ; SI-NEXT: v_alignbit_b32 v14, v15, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v49 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v54 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v6 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v36 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v7 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; SI-NEXT: v_alignbit_b32 v10, v11, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v37 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v51 ; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 ; SI-NEXT: v_alignbit_b32 v6, v7, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v33 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v32 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16 -; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v28 +; SI-NEXT: v_lshr_b64 v[33:34], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[29:30], 16 +; SI-NEXT: v_alignbit_b32 v28, v40, v41, 16 ; SI-NEXT: .LBB95_3: ; %end ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload @@ -60982,41 +61323,49 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v1, v33 +; SI-NEXT: v_mov_b32_e32 v5, v34 +; SI-NEXT: v_mov_b32_e32 v9, v35 +; SI-NEXT: v_mov_b32_e32 v13, v36 +; SI-NEXT: v_mov_b32_e32 v17, v37 +; SI-NEXT: v_mov_b32_e32 v21, v38 +; SI-NEXT: v_mov_b32_e32 v25, v50 +; SI-NEXT: v_mov_b32_e32 v29, v48 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB95_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr10 ; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr14 ; SI-NEXT: ; implicit-def: $vgpr15 ; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 ; SI-NEXT: ; implicit-def: $vgpr19 ; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr22 ; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: s_branch .LBB95_2 ; ; VI-LABEL: bitcast_v32bf16_to_v32i16_scalar: @@ -64240,534 +64589,686 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill ; SI-NEXT: s_mov_b64 exec, s[4:5] -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_writelane_b32 v37, s30, 0 -; SI-NEXT: v_writelane_b32 v37, s31, 1 -; SI-NEXT: v_writelane_b32 v37, s34, 2 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_writelane_b32 v20, s30, 0 +; SI-NEXT: v_writelane_b32 v20, s31, 1 +; SI-NEXT: v_writelane_b32 v20, s34, 2 +; SI-NEXT: v_writelane_b32 v20, s35, 3 +; SI-NEXT: v_writelane_b32 v20, s36, 4 +; SI-NEXT: v_writelane_b32 v20, s37, 5 +; SI-NEXT: v_writelane_b32 v20, s38, 6 +; SI-NEXT: v_writelane_b32 v20, s39, 7 +; SI-NEXT: v_writelane_b32 v20, s48, 8 +; SI-NEXT: v_writelane_b32 v20, s49, 9 +; SI-NEXT: v_writelane_b32 v20, s50, 10 +; SI-NEXT: v_writelane_b32 v20, s51, 11 +; SI-NEXT: v_writelane_b32 v20, s52, 12 +; SI-NEXT: v_writelane_b32 v20, s53, 13 +; SI-NEXT: v_writelane_b32 v20, s54, 14 +; SI-NEXT: v_writelane_b32 v20, s55, 15 +; SI-NEXT: v_writelane_b32 v20, s64, 16 +; SI-NEXT: v_writelane_b32 v20, s65, 17 +; SI-NEXT: v_writelane_b32 v20, s66, 18 +; SI-NEXT: v_writelane_b32 v20, s67, 19 +; SI-NEXT: v_writelane_b32 v20, s68, 20 +; SI-NEXT: v_writelane_b32 v20, s69, 21 +; SI-NEXT: v_writelane_b32 v20, s70, 22 +; SI-NEXT: v_writelane_b32 v20, s71, 23 +; SI-NEXT: v_writelane_b32 v20, s80, 24 +; SI-NEXT: v_writelane_b32 v20, s81, 25 +; SI-NEXT: v_writelane_b32 v20, s82, 26 +; SI-NEXT: v_writelane_b32 v20, s83, 27 +; SI-NEXT: v_writelane_b32 v20, s84, 28 +; SI-NEXT: v_writelane_b32 v20, s85, 29 +; SI-NEXT: v_writelane_b32 v20, s86, 30 +; SI-NEXT: v_writelane_b32 v20, s87, 31 +; SI-NEXT: v_writelane_b32 v20, s96, 32 +; SI-NEXT: v_writelane_b32 v20, s97, 33 +; SI-NEXT: v_writelane_b32 v20, s98, 34 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 -; SI-NEXT: v_writelane_b32 v37, s35, 3 -; SI-NEXT: v_readfirstlane_b32 s34, v18 -; SI-NEXT: v_readfirstlane_b32 s35, v17 -; SI-NEXT: v_readfirstlane_b32 s30, v14 -; SI-NEXT: v_readfirstlane_b32 s31, v13 -; SI-NEXT: v_readfirstlane_b32 s94, v10 -; SI-NEXT: v_readfirstlane_b32 s95, v9 -; SI-NEXT: v_readfirstlane_b32 s92, v6 -; SI-NEXT: v_readfirstlane_b32 s93, v5 -; SI-NEXT: v_readfirstlane_b32 s90, v2 -; SI-NEXT: v_readfirstlane_b32 s91, v1 +; SI-NEXT: v_writelane_b32 v20, s99, 35 +; SI-NEXT: s_mov_b32 s93, s18 +; SI-NEXT: s_mov_b32 s31, s17 +; SI-NEXT: v_readfirstlane_b32 s59, v18 +; SI-NEXT: v_readfirstlane_b32 s18, v17 +; SI-NEXT: v_readfirstlane_b32 s63, v16 +; SI-NEXT: v_readfirstlane_b32 s17, v15 +; SI-NEXT: v_readfirstlane_b32 s72, v14 +; SI-NEXT: v_readfirstlane_b32 s76, v13 +; SI-NEXT: v_readfirstlane_b32 s57, v12 +; SI-NEXT: v_readfirstlane_b32 s61, v11 +; SI-NEXT: v_readfirstlane_b32 s44, v10 +; SI-NEXT: v_readfirstlane_b32 s58, v9 +; SI-NEXT: v_readfirstlane_b32 s62, v8 +; SI-NEXT: v_readfirstlane_b32 s45, v7 +; SI-NEXT: v_readfirstlane_b32 s96, v6 +; SI-NEXT: v_readfirstlane_b32 s97, v5 +; SI-NEXT: v_readfirstlane_b32 s99, v4 +; SI-NEXT: v_readfirstlane_b32 s46, v3 +; SI-NEXT: v_readfirstlane_b32 s83, v2 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v4 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v8 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v16 +; SI-NEXT: v_readfirstlane_b32 s85, v1 +; SI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane ; SI-NEXT: s_cbranch_scc0 .LBB97_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 +; SI-NEXT: s_lshl_b32 s5, s31, 16 ; SI-NEXT: s_or_b32 s40, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff +; SI-NEXT: s_and_b32 s4, s93, 0xffff ; SI-NEXT: s_lshl_b32 s5, s19, 16 ; SI-NEXT: s_or_b32 s41, s4, s5 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 24 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v21, s4, 4 +; SI-NEXT: v_writelane_b32 v21, s5, 5 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v21, s4, 2 +; SI-NEXT: v_writelane_b32 v21, s5, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[40:41], 8 +; SI-NEXT: v_writelane_b32 v21, s4, 0 +; SI-NEXT: v_writelane_b32 v21, s5, 1 ; SI-NEXT: s_and_b32 s4, s20, 0xffff ; SI-NEXT: s_lshl_b32 s5, s21, 16 ; SI-NEXT: s_or_b32 s14, s4, s5 ; SI-NEXT: s_and_b32 s4, s22, 0xffff ; SI-NEXT: s_lshl_b32 s5, s23, 16 ; SI-NEXT: s_or_b32 s15, s4, s5 +; SI-NEXT: s_lshr_b64 s[4:5], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v21, s4, 10 +; SI-NEXT: v_writelane_b32 v21, s5, 11 +; SI-NEXT: s_lshr_b64 s[4:5], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v21, s4, 8 +; SI-NEXT: v_writelane_b32 v21, s5, 9 +; SI-NEXT: s_lshr_b64 s[4:5], s[14:15], 8 +; SI-NEXT: v_writelane_b32 v21, s4, 6 +; SI-NEXT: v_writelane_b32 v21, s5, 7 ; SI-NEXT: s_and_b32 s4, s24, 0xffff ; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: v_mov_b32_e32 v1, s40 -; SI-NEXT: s_or_b32 s12, s4, s5 +; SI-NEXT: s_or_b32 s10, s4, s5 ; SI-NEXT: s_and_b32 s4, s26, 0xffff ; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: v_alignbit_b32 v18, s41, v1, 24 -; SI-NEXT: v_alignbit_b32 v25, s41, v1, 16 -; SI-NEXT: v_alignbit_b32 v30, s41, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s14 -; SI-NEXT: s_or_b32 s13, s4, s5 +; SI-NEXT: s_or_b32 s11, s4, s5 +; SI-NEXT: s_lshr_b64 s[4:5], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v21, s4, 16 +; SI-NEXT: v_writelane_b32 v21, s5, 17 +; SI-NEXT: s_lshr_b64 s[4:5], s[10:11], 16 +; SI-NEXT: v_writelane_b32 v21, s4, 14 +; SI-NEXT: v_writelane_b32 v21, s5, 15 +; SI-NEXT: s_lshr_b64 s[4:5], s[10:11], 8 +; SI-NEXT: v_writelane_b32 v21, s4, 12 +; SI-NEXT: v_writelane_b32 v21, s5, 13 ; SI-NEXT: s_and_b32 s4, s28, 0xffff ; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: v_alignbit_b32 v19, s15, v1, 24 -; SI-NEXT: v_alignbit_b32 v26, s15, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, s15, v1, 8 -; SI-NEXT: v_mov_b32_e32 v1, s12 -; SI-NEXT: s_or_b32 s10, s4, s5 -; SI-NEXT: s_and_b32 s4, s91, 0xffff -; SI-NEXT: s_lshl_b32 s5, s90, 16 -; SI-NEXT: v_alignbit_b32 v17, s13, v1, 24 -; SI-NEXT: v_alignbit_b32 v23, s13, v1, 16 -; SI-NEXT: v_alignbit_b32 v29, s13, v1, 8 -; SI-NEXT: s_or_b32 s11, s4, s5 -; SI-NEXT: v_mov_b32_e32 v1, s10 -; SI-NEXT: v_alignbit_b32 v16, s11, v1, 24 -; SI-NEXT: v_alignbit_b32 v20, s11, v1, 16 -; SI-NEXT: v_alignbit_b32 v27, s11, v1, 8 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: s_and_b32 s4, s93, 0xffff -; SI-NEXT: s_lshl_b32 s5, s92, 16 -; SI-NEXT: v_or_b32_e32 v5, v1, v33 -; SI-NEXT: s_or_b32 s9, s4, s5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 -; SI-NEXT: s_and_b32 s4, s95, 0xffff -; SI-NEXT: s_lshl_b32 s5, s94, 16 -; SI-NEXT: v_or_b32_e32 v4, v1, v34 +; SI-NEXT: s_or_b32 s42, s4, s5 +; SI-NEXT: s_and_b32 s4, s85, 0xffff +; SI-NEXT: s_lshl_b32 s5, s83, 16 +; SI-NEXT: s_or_b32 s43, s4, s5 +; SI-NEXT: s_and_b32 s4, s46, 0xffff +; SI-NEXT: s_lshl_b32 s5, s99, 16 +; SI-NEXT: s_or_b32 s12, s4, s5 +; SI-NEXT: s_and_b32 s4, s97, 0xffff +; SI-NEXT: s_lshl_b32 s5, s96, 16 +; SI-NEXT: s_or_b32 s13, s4, s5 +; SI-NEXT: s_and_b32 s4, s45, 0xffff +; SI-NEXT: s_lshl_b32 s5, s62, 16 ; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: s_and_b32 s4, s31, 0xffff -; SI-NEXT: s_lshl_b32 s5, s30, 16 -; SI-NEXT: v_or_b32_e32 v2, v1, v35 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 -; SI-NEXT: s_and_b32 s4, s35, 0xffff -; SI-NEXT: s_lshl_b32 s5, s34, 16 -; SI-NEXT: v_or_b32_e32 v1, v1, v36 +; SI-NEXT: s_and_b32 s4, s58, 0xffff +; SI-NEXT: s_lshl_b32 s5, s44, 16 +; SI-NEXT: s_or_b32 s9, s4, s5 +; SI-NEXT: s_and_b32 s4, s61, 0xffff +; SI-NEXT: s_lshl_b32 s5, s57, 16 ; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: v_alignbit_b32 v9, s9, v5, 24 -; SI-NEXT: v_alignbit_b32 v12, s9, v5, 16 -; SI-NEXT: v_alignbit_b32 v21, s9, v5, 8 -; SI-NEXT: v_alignbit_b32 v6, s8, v4, 24 -; SI-NEXT: v_alignbit_b32 v8, s8, v4, 16 -; SI-NEXT: v_alignbit_b32 v13, s8, v4, 8 -; SI-NEXT: v_alignbit_b32 v24, s7, v2, 24 -; SI-NEXT: v_alignbit_b32 v28, s7, v2, 16 -; SI-NEXT: v_alignbit_b32 v32, s7, v2, 8 -; SI-NEXT: v_alignbit_b32 v10, s6, v1, 24 -; SI-NEXT: v_alignbit_b32 v14, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v22, s6, v1, 8 -; SI-NEXT: s_lshr_b32 s78, s41, 8 -; SI-NEXT: s_lshr_b32 s75, s15, 8 -; SI-NEXT: s_lshr_b32 s72, s13, 8 -; SI-NEXT: s_lshr_b32 s61, s11, 8 -; SI-NEXT: s_lshr_b32 s58, s9, 8 -; SI-NEXT: s_lshr_b32 s47, s8, 8 -; SI-NEXT: s_lshr_b32 s45, s7, 8 -; SI-NEXT: s_lshr_b32 s42, s6, 8 -; SI-NEXT: s_and_b32 s88, s19, 0xffff -; SI-NEXT: s_and_b32 s77, s23, 0xffff -; SI-NEXT: s_and_b32 s74, s27, 0xffff -; SI-NEXT: s_and_b32 s63, s90, 0xffff -; SI-NEXT: s_and_b32 s60, s92, 0xffff -; SI-NEXT: s_and_b32 s57, s94, 0xffff -; SI-NEXT: s_and_b32 s46, s30, 0xffff -; SI-NEXT: s_and_b32 s43, s34, 0xffff -; SI-NEXT: s_bfe_u32 s89, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s79, s23, 0x80008 -; SI-NEXT: s_bfe_u32 s76, s27, 0x80008 -; SI-NEXT: s_bfe_u32 s73, s90, 0x80008 -; SI-NEXT: s_bfe_u32 s62, s92, 0x80008 -; SI-NEXT: s_bfe_u32 s59, s94, 0x80008 -; SI-NEXT: s_bfe_u32 s56, s30, 0x80008 -; SI-NEXT: s_bfe_u32 s44, s34, 0x80008 +; SI-NEXT: s_and_b32 s4, s76, 0xffff +; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: s_or_b32 s7, s4, s5 +; SI-NEXT: s_and_b32 s4, s17, 0xffff +; SI-NEXT: s_lshl_b32 s5, s63, 16 +; SI-NEXT: s_and_b32 s78, s72, 0xffff +; SI-NEXT: s_lshr_b64 s[34:35], s[8:9], 24 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s47, s59, 16 +; SI-NEXT: s_mov_b32 s35, s78 +; SI-NEXT: s_mov_b32 s78, s93 +; SI-NEXT: s_lshr_b64 s[92:93], s[6:7], 24 +; SI-NEXT: s_or_b32 s5, s5, s47 +; SI-NEXT: s_lshr_b32 s79, s7, 8 +; SI-NEXT: s_mov_b32 s93, s78 +; SI-NEXT: s_lshr_b64 s[94:95], s[6:7], 16 +; SI-NEXT: s_mov_b32 s78, s31 +; SI-NEXT: s_lshr_b64 s[30:31], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s88, s5, 8 +; SI-NEXT: s_bfe_u32 s89, s72, 0x80008 +; SI-NEXT: s_lshr_b64 s[36:37], s[8:9], 16 +; SI-NEXT: s_mov_b32 s95, s79 +; SI-NEXT: s_mov_b32 s31, s78 +; SI-NEXT: s_lshr_b64 s[78:79], s[4:5], 24 +; SI-NEXT: s_and_b32 s90, s59, 0xffff +; SI-NEXT: s_mov_b32 s37, s89 +; SI-NEXT: s_mov_b32 s79, s88 +; SI-NEXT: s_lshr_b64 s[88:89], s[4:5], 16 +; SI-NEXT: s_bfe_u32 vcc_lo, s59, 0x80008 +; SI-NEXT: s_mov_b32 s89, s90 +; SI-NEXT: s_lshr_b64 s[90:91], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s60, s41, 8 +; SI-NEXT: s_lshr_b32 s87, s15, 8 +; SI-NEXT: s_lshr_b32 s82, s11, 8 +; SI-NEXT: s_lshr_b32 s71, s43, 8 +; SI-NEXT: s_lshr_b32 s68, s13, 8 +; SI-NEXT: s_lshr_b32 s73, s9, 8 +; SI-NEXT: s_and_b32 s74, s19, 0xffff +; SI-NEXT: s_and_b32 s98, s23, 0xffff +; SI-NEXT: s_and_b32 s84, s27, 0xffff +; SI-NEXT: s_and_b32 s80, s83, 0xffff +; SI-NEXT: s_and_b32 s69, s96, 0xffff +; SI-NEXT: s_and_b32 s75, s44, 0xffff +; SI-NEXT: s_bfe_u32 s47, s19, 0x80008 +; SI-NEXT: s_bfe_u32 s56, s23, 0x80008 +; SI-NEXT: s_bfe_u32 s86, s27, 0x80008 +; SI-NEXT: s_bfe_u32 s81, s83, 0x80008 +; SI-NEXT: s_bfe_u32 s70, s96, 0x80008 +; SI-NEXT: s_bfe_u32 s77, s44, 0x80008 +; SI-NEXT: s_lshr_b64 s[54:55], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[48:49], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[50:51], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[38:39], s[8:9], 8 +; SI-NEXT: s_mov_b32 s91, vcc_lo ; SI-NEXT: s_cbranch_execnz .LBB97_3 ; SI-NEXT: .LBB97_2: ; %cmp.true -; SI-NEXT: s_add_i32 s35, s35, 3 -; SI-NEXT: s_and_b32 s4, s35, 0xffff -; SI-NEXT: s_lshl_b32 s5, s34, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s31, s31, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s31, 0xffff -; SI-NEXT: s_lshl_b32 s5, s30, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s95, s95, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s95, 0xffff -; SI-NEXT: s_lshl_b32 s5, s94, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s93, s93, 3 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s93, 0xffff -; SI-NEXT: s_lshl_b32 s5, s92, 16 +; SI-NEXT: s_add_i32 s17, s17, 3 +; SI-NEXT: s_and_b32 s4, s17, 0xffff +; SI-NEXT: s_lshl_b32 s5, s63, 16 +; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s59, 16 +; SI-NEXT: s_add_i32 s61, s61, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s61, 0xffff +; SI-NEXT: s_lshl_b32 s7, s57, 16 +; SI-NEXT: s_add_i32 s76, s76, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s76, 0xffff +; SI-NEXT: s_lshl_b32 s8, s72, 16 +; SI-NEXT: s_add_i32 s45, s45, 3 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s8, s45, 0xffff +; SI-NEXT: s_lshl_b32 s9, s62, 16 +; SI-NEXT: s_add_i32 s58, s58, 3 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: s_and_b32 s9, s58, 0xffff +; SI-NEXT: s_lshl_b32 s10, s44, 16 +; SI-NEXT: s_add_i32 s46, s46, 3 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: s_and_b32 s10, s46, 0xffff +; SI-NEXT: s_lshl_b32 s11, s99, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_add_i32 s97, s97, 3 +; SI-NEXT: s_add_i32 s12, s10, 0x30000 +; SI-NEXT: s_and_b32 s10, s97, 0xffff +; SI-NEXT: s_lshl_b32 s11, s96, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s9, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: s_lshl_b32 s5, s29, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s91, s91, 3 -; SI-NEXT: s_add_i32 s10, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s91, 0xffff -; SI-NEXT: s_lshl_b32 s5, s90, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_add_i32 s13, s10, 0x30000 +; SI-NEXT: s_and_b32 s10, s28, 0xffff +; SI-NEXT: s_lshl_b32 s11, s29, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_add_i32 s85, s85, 3 +; SI-NEXT: s_add_i32 s42, s10, 0x30000 +; SI-NEXT: s_and_b32 s10, s85, 0xffff +; SI-NEXT: s_lshl_b32 s11, s83, 16 +; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s11, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: s_lshl_b32 s5, s25, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_add_i32 s43, s10, 0x30000 +; SI-NEXT: s_and_b32 s10, s24, 0xffff +; SI-NEXT: s_lshl_b32 s11, s25, 16 ; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_add_i32 s12, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: s_lshl_b32 s5, s27, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: s_and_b32 s11, s26, 0xffff +; SI-NEXT: s_lshl_b32 s14, s27, 16 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s13, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s11, s14, s11 +; SI-NEXT: s_and_b32 s14, s20, 0xffff +; SI-NEXT: s_lshl_b32 s15, s21, 16 ; SI-NEXT: s_add_i32 s22, s22, 3 -; SI-NEXT: s_add_i32 s14, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: s_lshl_b32 s5, s23, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: s_and_b32 s15, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s23, 16 ; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s15, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s40, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v15 -; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v11 -; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v7 -; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v3 -; SI-NEXT: s_add_i32 s41, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v6, s40 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_alignbit_b32 v18, s41, v6, 24 -; SI-NEXT: v_alignbit_b32 v25, s41, v6, 16 -; SI-NEXT: v_alignbit_b32 v30, s41, v6, 8 -; SI-NEXT: v_mov_b32_e32 v6, s14 -; SI-NEXT: v_or_b32_e32 v1, v36, v1 -; SI-NEXT: v_or_b32_e32 v2, v35, v2 -; SI-NEXT: v_or_b32_e32 v4, v34, v4 -; SI-NEXT: v_or_b32_e32 v3, v33, v3 -; SI-NEXT: v_alignbit_b32 v19, s15, v6, 24 -; SI-NEXT: v_alignbit_b32 v26, s15, v6, 16 -; SI-NEXT: v_alignbit_b32 v31, s15, v6, 8 -; SI-NEXT: v_mov_b32_e32 v6, s12 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x30000, v1 -; SI-NEXT: v_mov_b32_e32 v15, s6 -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x30000, v2 -; SI-NEXT: v_mov_b32_e32 v10, s7 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x30000, v4 -; SI-NEXT: v_mov_b32_e32 v7, s8 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x30000, v3 -; SI-NEXT: v_mov_b32_e32 v3, s9 -; SI-NEXT: v_alignbit_b32 v17, s13, v6, 24 -; SI-NEXT: v_alignbit_b32 v23, s13, v6, 16 -; SI-NEXT: v_alignbit_b32 v29, s13, v6, 8 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_alignbit_b32 v16, s11, v6, 24 -; SI-NEXT: v_alignbit_b32 v20, s11, v6, 16 -; SI-NEXT: v_alignbit_b32 v27, s11, v6, 8 -; SI-NEXT: v_alignbit_b32 v9, v3, v5, 24 -; SI-NEXT: v_alignbit_b32 v12, v3, v5, 16 -; SI-NEXT: v_alignbit_b32 v21, v3, v5, 8 -; SI-NEXT: v_alignbit_b32 v6, v7, v4, 24 -; SI-NEXT: v_alignbit_b32 v8, v7, v4, 16 -; SI-NEXT: v_alignbit_b32 v13, v7, v4, 8 -; SI-NEXT: v_alignbit_b32 v24, v10, v2, 24 -; SI-NEXT: v_alignbit_b32 v28, v10, v2, 16 -; SI-NEXT: v_alignbit_b32 v32, v10, v2, 8 -; SI-NEXT: v_alignbit_b32 v10, v15, v1, 24 -; SI-NEXT: v_alignbit_b32 v14, v15, v1, 16 -; SI-NEXT: v_alignbit_b32 v22, v15, v1, 8 -; SI-NEXT: s_lshr_b32 s89, s41, 24 -; SI-NEXT: s_lshr_b32 s88, s41, 16 -; SI-NEXT: s_lshr_b32 s78, s41, 8 -; SI-NEXT: s_lshr_b32 s79, s15, 24 -; SI-NEXT: s_lshr_b32 s77, s15, 16 -; SI-NEXT: s_lshr_b32 s75, s15, 8 -; SI-NEXT: s_lshr_b32 s76, s13, 24 -; SI-NEXT: s_lshr_b32 s74, s13, 16 -; SI-NEXT: s_lshr_b32 s72, s13, 8 -; SI-NEXT: s_lshr_b32 s73, s11, 24 -; SI-NEXT: s_lshr_b32 s63, s11, 16 -; SI-NEXT: s_lshr_b32 s61, s11, 8 -; SI-NEXT: s_lshr_b32 s62, s9, 24 -; SI-NEXT: s_lshr_b32 s60, s9, 16 -; SI-NEXT: s_lshr_b32 s58, s9, 8 -; SI-NEXT: s_lshr_b32 s59, s8, 24 -; SI-NEXT: s_lshr_b32 s57, s8, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 8 -; SI-NEXT: s_lshr_b32 s56, s7, 24 -; SI-NEXT: s_lshr_b32 s46, s7, 16 -; SI-NEXT: s_lshr_b32 s45, s7, 8 -; SI-NEXT: s_lshr_b32 s44, s6, 24 -; SI-NEXT: s_lshr_b32 s43, s6, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 8 +; SI-NEXT: s_or_b32 s15, s17, s15 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s31, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s40, s16, 0x30000 +; SI-NEXT: s_add_i32 s16, s93, 3 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_lshl_b32 s17, s19, 16 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: s_add_i32 s41, s16, 0x30000 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 24 +; SI-NEXT: v_writelane_b32 v21, s16, 4 +; SI-NEXT: v_writelane_b32 v21, s17, 5 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 16 +; SI-NEXT: v_writelane_b32 v21, s16, 2 +; SI-NEXT: v_writelane_b32 v21, s17, 3 +; SI-NEXT: s_lshr_b64 s[16:17], s[40:41], 8 +; SI-NEXT: s_add_i32 s14, s14, 0x30000 +; SI-NEXT: s_add_i32 s15, s15, 0x30000 +; SI-NEXT: v_writelane_b32 v21, s16, 0 +; SI-NEXT: v_writelane_b32 v21, s17, 1 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 24 +; SI-NEXT: v_writelane_b32 v21, s16, 10 +; SI-NEXT: v_writelane_b32 v21, s17, 11 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 16 +; SI-NEXT: v_writelane_b32 v21, s16, 8 +; SI-NEXT: v_writelane_b32 v21, s17, 9 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 +; SI-NEXT: s_add_i32 s10, s10, 0x30000 +; SI-NEXT: s_add_i32 s11, s11, 0x30000 +; SI-NEXT: v_writelane_b32 v21, s16, 6 +; SI-NEXT: v_writelane_b32 v21, s17, 7 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 24 +; SI-NEXT: v_writelane_b32 v21, s16, 16 +; SI-NEXT: v_writelane_b32 v21, s17, 17 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 16 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_add_i32 s6, s6, 0x30000 +; SI-NEXT: s_add_i32 s7, s7, 0x30000 +; SI-NEXT: s_add_i32 s8, s8, 0x30000 +; SI-NEXT: s_add_i32 s9, s9, 0x30000 +; SI-NEXT: v_writelane_b32 v21, s16, 14 +; SI-NEXT: v_writelane_b32 v21, s17, 15 +; SI-NEXT: s_lshr_b64 s[16:17], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[34:35], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[36:37], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[88:89], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[4:5], 8 +; SI-NEXT: v_writelane_b32 v21, s16, 12 +; SI-NEXT: s_lshr_b64 s[54:55], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[64:65], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[66:67], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[48:49], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[50:51], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[52:53], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[38:39], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[92:93], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[30:31], s[6:7], 8 +; SI-NEXT: s_lshr_b32 s47, s41, 24 +; SI-NEXT: s_lshr_b32 s74, s41, 16 +; SI-NEXT: s_lshr_b32 s60, s41, 8 +; SI-NEXT: s_lshr_b32 s56, s15, 24 +; SI-NEXT: s_lshr_b32 s98, s15, 16 +; SI-NEXT: s_lshr_b32 s87, s15, 8 +; SI-NEXT: s_lshr_b32 s86, s11, 24 +; SI-NEXT: s_lshr_b32 s84, s11, 16 +; SI-NEXT: s_lshr_b32 s82, s11, 8 +; SI-NEXT: s_lshr_b32 s81, s43, 24 +; SI-NEXT: s_lshr_b32 s80, s43, 16 +; SI-NEXT: s_lshr_b32 s71, s43, 8 +; SI-NEXT: s_lshr_b32 s70, s13, 24 +; SI-NEXT: s_lshr_b32 s69, s13, 16 +; SI-NEXT: s_lshr_b32 s68, s13, 8 +; SI-NEXT: s_lshr_b32 s77, s9, 24 +; SI-NEXT: s_lshr_b32 s75, s9, 16 +; SI-NEXT: s_lshr_b32 s73, s9, 8 +; SI-NEXT: s_lshr_b32 s37, s7, 24 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s95, s7, 8 +; SI-NEXT: s_lshr_b32 s91, s5, 24 +; SI-NEXT: s_lshr_b32 s89, s5, 16 +; SI-NEXT: s_lshr_b32 s79, s5, 8 +; SI-NEXT: v_writelane_b32 v21, s17, 13 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v30 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s41, 0xff -; SI-NEXT: s_lshl_b32 s5, s78, 8 -; SI-NEXT: v_and_b32_e32 v7, 0xff, v25 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s88, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v18 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s16, s89, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s16, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s14, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v31 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s15, 0xff -; SI-NEXT: s_lshl_b32 s5, s75, 8 +; SI-NEXT: v_readlane_b32 s18, v21, 0 +; SI-NEXT: v_readlane_b32 s19, v21, 1 +; SI-NEXT: s_lshl_b32 s17, s18, 8 +; SI-NEXT: v_readlane_b32 s18, v21, 2 +; SI-NEXT: s_and_b32 s16, s40, 0xff +; SI-NEXT: v_readlane_b32 s19, v21, 3 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s18, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 4 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s18, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xff +; SI-NEXT: s_lshl_b32 s17, s60, 8 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: s_and_b32 s17, s74, 0xff +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: s_lshl_b32 s18, s47, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: v_readlane_b32 s16, v21, 6 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s17, v21, 7 +; SI-NEXT: s_lshl_b32 s16, s16, 8 +; SI-NEXT: v_readlane_b32 s19, v21, 5 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: v_readlane_b32 s16, v21, 8 +; SI-NEXT: v_readlane_b32 s17, v21, 9 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: v_readlane_b32 s18, v21, 10 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s18, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v26 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s77, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v19 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s79, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s12, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v29 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s5, s72, 8 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: s_lshl_b32 s15, s87, 8 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: s_and_b32 s15, s98, 0xff +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: s_lshl_b32 s16, s56, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: v_readlane_b32 s14, v21, 12 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: v_readlane_b32 s15, v21, 13 +; SI-NEXT: s_lshl_b32 s14, s14, 8 +; SI-NEXT: s_or_b32 s10, s10, s14 +; SI-NEXT: v_readlane_b32 s14, v21, 14 +; SI-NEXT: v_readlane_b32 s15, v21, 15 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: v_readlane_b32 s16, v21, 16 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s16, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s10, s10, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v23 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s74, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v17 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s12, s76, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s12, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s82, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s84, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s14, s86, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s14, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: s_and_b32 s4, s10, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v27 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: s_lshl_b32 s5, s61, 8 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s42, 0xff +; SI-NEXT: s_lshl_b32 s11, s66, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s64, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s14, s54, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s14, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v20 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s63, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v16 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s10, s73, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v7, v11, v7 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s10, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s43, 0xff +; SI-NEXT: s_lshl_b32 s11, s71, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s80, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s14, s81, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s14, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: buffer_store_dword v7, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v21 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s5, s58, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v12 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s60, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s12, 0xff +; SI-NEXT: s_lshl_b32 s11, s52, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s50, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s48, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v9 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s9, s62, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s9, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s13, 0xff +; SI-NEXT: s_lshl_b32 s11, s68, 8 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: s_and_b32 s11, s69, 0xff +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: s_lshl_b32 s12, s70, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s38, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s36, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s34, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: buffer_store_dword v5, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v3, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v13 -; SI-NEXT: s_and_b32 s4, s8, 0xff -; SI-NEXT: s_lshl_b32 s5, s47, 8 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v8 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s57, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s73, 8 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_and_b32 s9, s75, 0xff +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_lshl_b32 s10, s77, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v6 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s8, s59, 24 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s8, s5 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s30, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s94, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s92, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v32 -; SI-NEXT: s_and_b32 s4, s7, 0xff -; SI-NEXT: s_lshl_b32 s5, s45, 8 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v28 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s46, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s95, 8 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_and_b32 s7, s35, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s8, s37, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v24 -; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s7, s56, 24 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s90, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s88, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s78, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s7, s5 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0 -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v22 -; SI-NEXT: s_and_b32 s4, s6, 0xff -; SI-NEXT: s_lshl_b32 s5, s42, 8 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v14 +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s79, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s43, 0xff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v10 +; SI-NEXT: s_and_b32 s5, s89, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s44, 24 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: s_lshl_b32 s6, s91, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: v_readlane_b32 s19, v21, 11 +; SI-NEXT: v_readlane_b32 s17, v21, 17 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: v_readlane_b32 s35, v37, 3 -; SI-NEXT: v_readlane_b32 s34, v37, 2 -; SI-NEXT: v_readlane_b32 s31, v37, 1 -; SI-NEXT: v_readlane_b32 s30, v37, 0 +; SI-NEXT: v_readlane_b32 s99, v20, 35 +; SI-NEXT: v_readlane_b32 s98, v20, 34 +; SI-NEXT: v_readlane_b32 s97, v20, 33 +; SI-NEXT: v_readlane_b32 s96, v20, 32 +; SI-NEXT: v_readlane_b32 s87, v20, 31 +; SI-NEXT: v_readlane_b32 s86, v20, 30 +; SI-NEXT: v_readlane_b32 s85, v20, 29 +; SI-NEXT: v_readlane_b32 s84, v20, 28 +; SI-NEXT: v_readlane_b32 s83, v20, 27 +; SI-NEXT: v_readlane_b32 s82, v20, 26 +; SI-NEXT: v_readlane_b32 s81, v20, 25 +; SI-NEXT: v_readlane_b32 s80, v20, 24 +; SI-NEXT: v_readlane_b32 s71, v20, 23 +; SI-NEXT: v_readlane_b32 s70, v20, 22 +; SI-NEXT: v_readlane_b32 s69, v20, 21 +; SI-NEXT: v_readlane_b32 s68, v20, 20 +; SI-NEXT: v_readlane_b32 s67, v20, 19 +; SI-NEXT: v_readlane_b32 s66, v20, 18 +; SI-NEXT: v_readlane_b32 s65, v20, 17 +; SI-NEXT: v_readlane_b32 s64, v20, 16 +; SI-NEXT: v_readlane_b32 s55, v20, 15 +; SI-NEXT: v_readlane_b32 s54, v20, 14 +; SI-NEXT: v_readlane_b32 s53, v20, 13 +; SI-NEXT: v_readlane_b32 s52, v20, 12 +; SI-NEXT: v_readlane_b32 s51, v20, 11 +; SI-NEXT: v_readlane_b32 s50, v20, 10 +; SI-NEXT: v_readlane_b32 s49, v20, 9 +; SI-NEXT: v_readlane_b32 s48, v20, 8 +; SI-NEXT: v_readlane_b32 s39, v20, 7 +; SI-NEXT: v_readlane_b32 s38, v20, 6 +; SI-NEXT: v_readlane_b32 s37, v20, 5 +; SI-NEXT: v_readlane_b32 s36, v20, 4 +; SI-NEXT: v_readlane_b32 s35, v20, 3 +; SI-NEXT: v_readlane_b32 s34, v20, 2 +; SI-NEXT: v_readlane_b32 s31, v20, 1 +; SI-NEXT: v_readlane_b32 s30, v20, 0 ; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload ; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v21, s4, 0 +; SI-NEXT: v_writelane_b32 v21, s5, 1 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $sgpr78 -; SI-NEXT: ; implicit-def: $sgpr88 -; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr47 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr87 +; SI-NEXT: ; implicit-def: $sgpr98 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr82 +; SI-NEXT: ; implicit-def: $sgpr84 +; SI-NEXT: ; implicit-def: $sgpr86 +; SI-NEXT: ; implicit-def: $sgpr71 +; SI-NEXT: ; implicit-def: $sgpr80 +; SI-NEXT: ; implicit-def: $sgpr81 +; SI-NEXT: ; implicit-def: $sgpr68 +; SI-NEXT: ; implicit-def: $sgpr69 +; SI-NEXT: ; implicit-def: $sgpr70 +; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr95 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr91 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr66 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr54 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $sgpr10 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: v_writelane_b32 v21, s4, 2 +; SI-NEXT: v_writelane_b32 v21, s5, 3 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 4 +; SI-NEXT: v_writelane_b32 v21, s5, 5 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 6 +; SI-NEXT: v_writelane_b32 v21, s5, 7 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 8 +; SI-NEXT: v_writelane_b32 v21, s5, 9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 10 +; SI-NEXT: v_writelane_b32 v21, s5, 11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 12 +; SI-NEXT: v_writelane_b32 v21, s5, 13 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 14 +; SI-NEXT: v_writelane_b32 v21, s5, 15 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: v_writelane_b32 v21, s4, 16 +; SI-NEXT: v_writelane_b32 v21, s5, 17 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v32i16_to_v64i8_scalar: @@ -68563,433 +69064,541 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 -; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v46, v30 ; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:76 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32 -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:28 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24 -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:20 -; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:40 -; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:60 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:24 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:72 -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:68 -; SI-NEXT: v_readfirstlane_b32 s15, v27 -; SI-NEXT: v_readfirstlane_b32 s40, v26 -; SI-NEXT: v_readfirstlane_b32 s12, v19 -; SI-NEXT: v_readfirstlane_b32 s13, v18 -; SI-NEXT: v_readfirstlane_b32 s10, v11 -; SI-NEXT: v_readfirstlane_b32 s11, v10 -; SI-NEXT: v_readfirstlane_b32 s8, v3 -; SI-NEXT: v_readfirstlane_b32 s9, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v1 -; SI-NEXT: v_readfirstlane_b32 s6, v0 -; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7 -; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v9 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5 -; SI-NEXT: v_lshlrev_b32_e32 v49, 8, v15 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17 -; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v13 -; SI-NEXT: v_lshlrev_b32_e32 v53, 8, v23 -; SI-NEXT: v_lshlrev_b32_e32 v52, 24, v25 -; SI-NEXT: v_lshlrev_b32_e32 v54, 24, v21 -; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v29 +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:56 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:72 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:68 +; SI-NEXT: v_readfirstlane_b32 s43, v1 +; SI-NEXT: v_readfirstlane_b32 s42, v0 +; SI-NEXT: v_lshlrev_b32_e32 v42, 8, v3 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v5 +; SI-NEXT: v_lshlrev_b32_e32 v40, 8, v11 +; SI-NEXT: v_lshlrev_b32_e32 v53, 24, v13 +; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v19 +; SI-NEXT: v_lshlrev_b32_e32 v52, 24, v21 +; SI-NEXT: v_lshlrev_b32_e32 v59, 8, v27 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v29 +; SI-NEXT: v_lshlrev_b32_e32 v47, 8, v7 +; SI-NEXT: v_lshlrev_b32_e32 v45, 24, v9 +; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v15 +; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v17 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v23 +; SI-NEXT: v_lshlrev_b32_e32 v41, 24, v25 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_readfirstlane_b32 s44, v31 -; SI-NEXT: v_readfirstlane_b32 s45, v32 -; SI-NEXT: v_readfirstlane_b32 s42, v33 -; SI-NEXT: v_readfirstlane_b32 s43, v34 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v35 -; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v36 -; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v37 +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_lshlrev_b32_e32 v56, 8, v38 +; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36 +; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v48 +; SI-NEXT: s_waitcnt vmcnt(12) +; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v39 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_lshlrev_b32_e32 v61, 8, v37 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v39 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v49 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v48 +; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v30 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v59 +; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v31 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v61 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v62 +; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v33 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_lshlrev_b32_e32 v29, 24, v34 +; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB99_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v2 +; SI-NEXT: v_or_b32_e32 v0, v0, v42 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_or_b32_e32 v0, v18, v0 +; SI-NEXT: v_or_b32_e32 v37, v1, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v12 +; SI-NEXT: v_or_b32_e32 v1, v1, v40 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(10) expcnt(0) +; SI-NEXT: v_mov_b32_e32 v60, v44 +; SI-NEXT: v_or_b32_e32 v44, v53, v9 +; SI-NEXT: v_or_b32_e32 v33, v1, v44 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v9, 0xff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v15, v46 +; SI-NEXT: v_or_b32_e32 v46, v52, v9 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_or_b32_e32 v55, v3, v9 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v63 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v27, v13 +; SI-NEXT: v_mov_b32_e32 v58, v8 +; SI-NEXT: v_mov_b32_e32 v49, v45 +; SI-NEXT: v_mov_b32_e32 v36, v24 +; SI-NEXT: v_mov_b32_e32 v34, v26 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s14, s23, 24 +; SI-NEXT: s_lshl_b32 s6, s19, 24 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s14, s5 -; SI-NEXT: s_or_b32 s41, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 -; SI-NEXT: s_lshl_b32 s14, s19, 24 -; SI-NEXT: s_or_b32 s4, s14, s4 -; SI-NEXT: s_and_b32 s14, s28, 0xff -; SI-NEXT: s_lshl_b32 s46, s29, 8 -; SI-NEXT: s_or_b32 s14, s14, s46 -; SI-NEXT: s_and_b32 s46, s6, 0xff -; SI-NEXT: s_lshl_b32 s46, s46, 16 -; SI-NEXT: s_lshl_b32 s47, s7, 24 -; SI-NEXT: s_or_b32 s57, s47, s46 -; SI-NEXT: s_and_b32 s46, s26, 0xff -; SI-NEXT: s_lshl_b32 s46, s46, 16 -; SI-NEXT: s_lshl_b32 s47, s27, 24 -; SI-NEXT: s_or_b32 s46, s47, s46 -; SI-NEXT: s_and_b32 s47, s16, 0xff -; SI-NEXT: s_lshl_b32 s56, s17, 8 -; SI-NEXT: s_or_b32 s47, s47, s56 -; SI-NEXT: s_and_b32 s47, s47, 0xffff -; SI-NEXT: v_mov_b32_e32 v1, s4 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v8 -; SI-NEXT: s_or_b32 s47, s47, s4 +; SI-NEXT: s_or_b32 s12, s6, s5 +; SI-NEXT: s_or_b32 s6, s4, s12 ; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s56, s25, 8 -; SI-NEXT: v_or_b32_e32 v9, v9, v0 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_or_b32_e32 v11, v2, v10 +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s26, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_or_b32 s14, s7, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s7, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s7 +; SI-NEXT: s_and_b32 s7, s5, 0xffff +; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_or_b32 s5, s8, s5 +; SI-NEXT: s_or_b32 s13, s7, s5 +; SI-NEXT: s_lshr_b64 s[8:9], s[12:13], 16 +; SI-NEXT: s_and_b32 s7, s28, 0xff +; SI-NEXT: s_lshl_b32 s9, s29, 8 +; SI-NEXT: s_or_b32 s7, s7, s9 +; SI-NEXT: s_and_b32 s9, s7, 0xffff +; SI-NEXT: s_and_b32 s7, s42, 0xff +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_lshl_b32 s10, s43, 24 +; SI-NEXT: s_or_b32 s7, s10, s7 +; SI-NEXT: s_or_b32 s15, s9, s7 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_mov_b32_e32 v5, s46 -; SI-NEXT: v_or_b32_e32 v10, v9, v11 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v4 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v14 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v16 -; SI-NEXT: s_or_b32 s46, s4, s46 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s56, s8, 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[14:15], 16 +; SI-NEXT: s_or_b32 s4, s4, s14 +; SI-NEXT: v_mov_b32_e32 v39, v32 +; SI-NEXT: s_lshr_b32 s9, s5, 16 +; SI-NEXT: s_lshr_b32 s11, s7, 16 +; SI-NEXT: s_mov_b32 s7, s13 +; SI-NEXT: s_mov_b32 s5, s15 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v43 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v48, v1, v46 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v26 +; SI-NEXT: v_or_b32_e32 v1, v1, v59 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v35, v1, v55 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v17 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v43, v13, v9 +; SI-NEXT: v_or_b32_e32 v50, v1, v43 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v54 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v51 +; SI-NEXT: v_or_b32_e32 v1, v1, v19 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v13, v13, v49 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v15, v3, v9 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_or_b32_e32 v19, v7, v17 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v36, v13, v19 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v12 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v22 -; SI-NEXT: v_and_b32_e32 v18, 0xff, v24 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v55 -; SI-NEXT: v_or_b32_e32 v35, s4, v15 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: s_lshl_b32 s56, s10, 8 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v17, v17, v53 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v23, v51, v13 -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 -; SI-NEXT: v_or_b32_e32 v27, v52, v18 -; SI-NEXT: v_or_b32_e32 v62, v47, v32 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v41 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v18, v17, v27 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v20 -; SI-NEXT: v_and_b32_e32 v21, 0xff, v30 -; SI-NEXT: v_and_b32_e32 v26, 0xff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: v_or_b32_e32 v37, s4, v23 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s56, s12, 8 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v21, v21, v43 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26 -; SI-NEXT: v_or_b32_e32 v33, v58, v32 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v45 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v34, 0xff, v46 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v25, v54, v17 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v31, v42, v26 -; SI-NEXT: v_or_b32_e32 v32, v32, v60 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v38, v21, v31 -; SI-NEXT: v_and_b32_e32 v21, 0xff, v28 -; SI-NEXT: v_and_b32_e32 v32, 0xffff, v32 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_mov_b32_e32 v13, v4 +; SI-NEXT: v_mov_b32_e32 v4, v2 +; SI-NEXT: v_mov_b32_e32 v2, v59 +; SI-NEXT: v_mov_b32_e32 v59, v3 +; SI-NEXT: v_mov_b32_e32 v3, v63 +; SI-NEXT: v_mov_b32_e32 v63, v40 +; SI-NEXT: v_mov_b32_e32 v40, v42 +; SI-NEXT: v_or_b32_e32 v42, v11, v9 +; SI-NEXT: v_or_b32_e32 v54, v1, v42 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v6 +; SI-NEXT: v_and_b32_e32 v9, 0xff, v8 +; SI-NEXT: v_or_b32_e32 v1, v1, v47 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v11, v45, v9 +; SI-NEXT: v_or_b32_e32 v1, v1, v11 +; SI-NEXT: v_mov_b32_e32 v19, v10 +; SI-NEXT: v_lshr_b64 v[9:10], v[0:1], 16 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v0, 0xff, v14 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_or_b32_e32 v63, v59, v34 -; SI-NEXT: v_or_b32_e32 v39, s4, v25 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s56, s15, 8 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v48, v32, v63 -; SI-NEXT: v_and_b32_e32 v32, 0xff, v57 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v29, v44, v21 -; SI-NEXT: v_and_b32_e32 v26, 0xff, v40 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v26, v26, v56 -; SI-NEXT: v_or_b32_e32 v34, v61, v32 -; SI-NEXT: v_or_b32_e32 v32, s4, v29 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: s_lshl_b32 s56, s42, 8 -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v26 -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: v_or_b32_e32 v26, v26, v62 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v17, v18, v25, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v33, 16 -; SI-NEXT: v_or_b32_e32 v33, s4, v33 -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: s_lshl_b32 s56, s44, 8 -; SI-NEXT: s_and_b32 s14, s14, 0xffff -; SI-NEXT: s_or_b32 s4, s4, s56 -; SI-NEXT: s_or_b32 s14, s14, s57 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_alignbit_b32 v1, s41, v1, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v15, 16 -; SI-NEXT: v_alignbit_b32 v13, v36, v23, 16 -; SI-NEXT: v_alignbit_b32 v21, v38, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, v48, v34, 16 -; SI-NEXT: v_or_b32_e32 v34, s4, v34 -; SI-NEXT: s_lshr_b32 s56, s5, 16 -; SI-NEXT: s_lshr_b32 s57, s57, 16 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v16 +; SI-NEXT: v_or_b32_e32 v0, v0, v57 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v23, v56, v10 +; SI-NEXT: v_mov_b32_e32 v8, v6 +; SI-NEXT: v_mov_b32_e32 v6, v14 +; SI-NEXT: v_or_b32_e32 v45, v0, v23 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v22 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v24 +; SI-NEXT: v_lshr_b64 v[9:10], v[44:45], 16 +; SI-NEXT: v_or_b32_e32 v0, v0, v5 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v14, v41, v14 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v24, v17 +; SI-NEXT: v_mov_b32_e32 v17, v47 +; SI-NEXT: v_or_b32_e32 v47, v0, v14 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[9:10], v[46:47], 16 +; SI-NEXT: v_mov_b32_e32 v46, v15 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v46 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v62 +; SI-NEXT: v_or_b32_e32 v0, v0, v61 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v31, v7, v15 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v9, v61 +; SI-NEXT: v_mov_b32_e32 v61, v7 +; SI-NEXT: v_mov_b32_e32 v7, v5 +; SI-NEXT: v_mov_b32_e32 v5, v52 +; SI-NEXT: v_mov_b32_e32 v52, v41 +; SI-NEXT: v_mov_b32_e32 v41, v62 +; SI-NEXT: v_mov_b32_e32 v62, v57 +; SI-NEXT: v_mov_b32_e32 v57, v53 +; SI-NEXT: v_mov_b32_e32 v53, v56 +; SI-NEXT: v_or_b32_e32 v56, v0, v31 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v51, v22 +; SI-NEXT: v_lshr_b64 v[21:22], v[55:56], 16 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v27 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v15, 0xff, v10 +; SI-NEXT: v_or_b32_e32 v0, v0, v25 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v22, v30, v15 +; SI-NEXT: v_or_b32_e32 v44, v0, v22 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v32 +; SI-NEXT: v_and_b32_e32 v15, 0xff, v60 +; SI-NEXT: v_or_b32_e32 v0, v0, v38 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v38, v29, v15 +; SI-NEXT: v_lshr_b64 v[25:26], v[43:44], 16 +; SI-NEXT: v_or_b32_e32 v43, v0, v38 +; SI-NEXT: v_mov_b32_e32 v0, v30 +; SI-NEXT: v_lshr_b64 v[29:30], v[42:43], 16 +; SI-NEXT: v_mov_b32_e32 v42, v40 +; SI-NEXT: v_mov_b32_e32 v40, v63 +; SI-NEXT: v_mov_b32_e32 v63, v3 +; SI-NEXT: v_mov_b32_e32 v3, v59 +; SI-NEXT: v_mov_b32_e32 v59, v2 +; SI-NEXT: v_mov_b32_e32 v10, v19 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v13 +; SI-NEXT: v_mov_b32_e32 v13, v27 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v22 +; SI-NEXT: v_mov_b32_e32 v14, v6 +; SI-NEXT: v_mov_b32_e32 v6, v8 +; SI-NEXT: v_mov_b32_e32 v8, v58 +; SI-NEXT: v_mov_b32_e32 v22, v51 +; SI-NEXT: v_mov_b32_e32 v51, v44 +; SI-NEXT: v_mov_b32_e32 v44, v60 +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v55, v43 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v30, v0 +; SI-NEXT: v_mov_b32_e32 v26, v34 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v23 ; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v31 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v62 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v63 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v38 +; SI-NEXT: v_mov_b32_e32 v38, v1 +; SI-NEXT: v_mov_b32_e32 v34, v45 +; SI-NEXT: v_mov_b32_e32 v45, v49 +; SI-NEXT: v_mov_b32_e32 v49, v47 +; SI-NEXT: v_mov_b32_e32 v47, v17 +; SI-NEXT: v_mov_b32_e32 v17, v24 +; SI-NEXT: v_mov_b32_e32 v24, v36 +; SI-NEXT: v_mov_b32_e32 v36, v56 +; SI-NEXT: v_mov_b32_e32 v56, v53 +; SI-NEXT: v_mov_b32_e32 v53, v57 +; SI-NEXT: v_mov_b32_e32 v57, v62 +; SI-NEXT: v_mov_b32_e32 v62, v41 +; SI-NEXT: v_mov_b32_e32 v41, v52 +; SI-NEXT: v_mov_b32_e32 v52, v5 +; SI-NEXT: v_mov_b32_e32 v5, v7 +; SI-NEXT: v_mov_b32_e32 v7, v61 +; SI-NEXT: v_mov_b32_e32 v61, v9 ; SI-NEXT: s_cbranch_execnz .LBB99_3 ; SI-NEXT: .LBB99_2: ; %cmp.true -; SI-NEXT: s_add_i32 s45, s45, 3 -; SI-NEXT: s_and_b32 s4, s45, 0xff -; SI-NEXT: s_lshl_b32 s5, s44, 8 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v57 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload +; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_and_b32 s4, s24, 0xff +; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_add_i32 s26, s26, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: s_and_b32 s6, s26, 0xff ; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: s_lshl_b32 s5, s27, 24 +; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v61, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v34, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v45 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_add_i32 s28, s28, 3 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s28, 0xff +; SI-NEXT: s_lshl_b32 s6, s29, 8 +; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s7, s42, 0xff +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s43, 24 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s16, 0xff +; SI-NEXT: s_lshl_b32 s7, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s8, s18, 0xff +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s19, 24 +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 +; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_and_b32 s7, s20, 0xff +; SI-NEXT: s_lshl_b32 s8, s21, 8 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_and_b32 s9, s22, 0xff +; SI-NEXT: s_addk_i32 s7, 0x300 +; SI-NEXT: s_lshl_b32 s8, s23, 24 +; SI-NEXT: s_lshl_b32 s9, s9, 16 +; SI-NEXT: s_and_b32 s7, s7, 0xffff +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_add_i32 s6, s6, 0x3000000 +; SI-NEXT: s_add_i32 s7, s7, 0x3000000 +; SI-NEXT: s_lshr_b64 s[8:9], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s9, s7, 16 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v1, v9, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v54, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v39 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v44 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v46 -; SI-NEXT: v_or_b32_e32 v1, v60, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v59, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s43, s43, 3 -; SI-NEXT: v_add_i32_e32 v48, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s43, 0xff -; SI-NEXT: s_lshl_b32 s5, s42, 8 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v41 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v1, v9, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v55, vcc, 0x3000000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v58, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v33, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v40 +; SI-NEXT: v_or_b32_e32 v1, v13, v1 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v55 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v17, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v50, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v58 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v55 -; SI-NEXT: v_or_b32_e32 v1, v56, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v47, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s40, s40, 3 -; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s40, 0xff -; SI-NEXT: s_lshl_b32 s5, s15, 8 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v30, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v51, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v26 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v28 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v0, v59, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v44, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v32, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v30 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v35, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v46 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v62 +; SI-NEXT: v_or_b32_e32 v0, v61, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v50 -; SI-NEXT: v_or_b32_e32 v1, v43, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v42, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: v_add_i32_e32 v38, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s13, 0xff -; SI-NEXT: s_lshl_b32 s5, s12, 8 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v7, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v36, vcc, 0x3000000, v0 +; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v54, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v39, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22 +; SI-NEXT: v_or_b32_e32 v1, v52, v1 +; SI-NEXT: v_lshr_b64 v[25:26], v[50:51], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[54:55], 16 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v51 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_or_b32_e32 v0, v43, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v48, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v22 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v24 +; SI-NEXT: v_or_b32_e32 v0, v5, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v24 -; SI-NEXT: v_or_b32_e32 v1, v53, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v52, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s11, 0xff -; SI-NEXT: s_lshl_b32 s5, s10, 8 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v41, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v49, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v10 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v12 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v0, v40, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v51, v1 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: v_add_i32_e32 v37, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v53, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v33, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v14 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v16 +; SI-NEXT: v_or_b32_e32 v0, v57, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v16 -; SI-NEXT: v_or_b32_e32 v1, v49, v1 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: v_add_i32_e32 v36, vcc, 0x3000000, v1 -; SI-NEXT: s_and_b32 s4, s9, 0xff -; SI-NEXT: s_lshl_b32 s5, s8, 8 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v56, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v34, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v4 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_or_b32_e32 v0, v42, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 +; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s5, s25, 8 -; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s8, s26, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s27, 24 -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s46, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s28, 0xff -; SI-NEXT: s_lshl_b32 s5, s29, 8 -; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s7, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s14, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s16, 0xff -; SI-NEXT: s_lshl_b32 s5, s17, 8 -; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s18, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s19, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 -; SI-NEXT: v_add_i32_e32 v35, vcc, 0x3000000, v1 -; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v6 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; SI-NEXT: v_or_b32_e32 v1, v18, v1 +; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v37, vcc, 0x3000000, v0 +; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v6 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 ; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v8 -; SI-NEXT: s_add_i32 s47, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: v_or_b32_e32 v0, v47, v0 ; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_and_b32 s6, s22, 0xff ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 -; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: v_or_b32_e32 v1, v45, v1 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0 -; SI-NEXT: s_add_i32 s41, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s47 -; SI-NEXT: v_alignbit_b32 v1, s41, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s46 -; SI-NEXT: v_alignbit_b32 v5, s14, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, v10, v35, 16 -; SI-NEXT: v_alignbit_b32 v13, v36, v37, 16 -; SI-NEXT: v_alignbit_b32 v17, v18, v39, 16 -; SI-NEXT: v_alignbit_b32 v21, v38, v32, 16 -; SI-NEXT: v_alignbit_b32 v25, v26, v33, 16 -; SI-NEXT: v_alignbit_b32 v29, v48, v34, 16 -; SI-NEXT: s_lshr_b32 s56, s41, 16 -; SI-NEXT: s_lshr_b32 s57, s14, 16 -; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v36 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v38 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v26 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v48 -; SI-NEXT: .LBB99_3: ; %end +; SI-NEXT: v_add_i32_e32 v38, vcc, 0x3000000, v0 +; SI-NEXT: v_lshr_b64 v[0:1], v[37:38], 16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[0:1], v[33:34], 16 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[0:1], v[48:49], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[35:36], 16 +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v38 +; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v34 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v49 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: .LBB99_3: ; %end +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload @@ -69006,55 +69615,62 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32 ; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_mov_b32_e32 v0, s47 -; SI-NEXT: v_mov_b32_e32 v2, s41 -; SI-NEXT: v_mov_b32_e32 v3, s56 -; SI-NEXT: v_mov_b32_e32 v4, s46 -; SI-NEXT: v_mov_b32_e32 v6, s14 -; SI-NEXT: v_mov_b32_e32 v7, s57 -; SI-NEXT: v_mov_b32_e32 v8, v35 -; SI-NEXT: v_mov_b32_e32 v12, v37 -; SI-NEXT: v_mov_b32_e32 v14, v36 -; SI-NEXT: v_mov_b32_e32 v16, v39 -; SI-NEXT: v_mov_b32_e32 v20, v32 -; SI-NEXT: v_mov_b32_e32 v22, v38 -; SI-NEXT: v_mov_b32_e32 v24, v33 -; SI-NEXT: v_mov_b32_e32 v28, v34 -; SI-NEXT: v_mov_b32_e32 v30, v48 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v0, s6 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s7 +; SI-NEXT: v_mov_b32_e32 v3, s9 +; SI-NEXT: v_mov_b32_e32 v4, s4 +; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v6, s5 +; SI-NEXT: v_mov_b32_e32 v7, s11 +; SI-NEXT: v_mov_b32_e32 v8, v37 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_mov_b32_e32 v10, v38 +; SI-NEXT: v_mov_b32_e32 v12, v33 +; SI-NEXT: v_mov_b32_e32 v14, v34 +; SI-NEXT: v_mov_b32_e32 v16, v48 +; SI-NEXT: v_mov_b32_e32 v18, v49 +; SI-NEXT: v_mov_b32_e32 v20, v35 +; SI-NEXT: v_mov_b32_e32 v22, v36 +; SI-NEXT: v_mov_b32_e32 v24, v50 +; SI-NEXT: v_mov_b32_e32 v26, v51 +; SI-NEXT: v_mov_b32_e32 v28, v54 +; SI-NEXT: v_mov_b32_e32 v30, v55 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: v_mov_b32_e32 v39, v32 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill ; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: s_branch .LBB99_2 ; @@ -76266,716 +76882,611 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32 ; SI-LABEL: bitcast_v32f16_to_v64i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v22, s17 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v40, s30, 0 +; SI-NEXT: v_writelane_b32 v40, s31, 1 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v6 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v62, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v7 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v15 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v10, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v9, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v7, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s20 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v11, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v46, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v42, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v10, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s24 ; SI-NEXT: v_cvt_f16_f32_e32 v5, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s29 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v13, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v17, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s28 +; SI-NEXT: v_writelane_b32 v40, s34, 2 +; SI-NEXT: v_writelane_b32 v40, s35, 3 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19 +; SI-NEXT: v_writelane_b32 v40, s36, 4 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: v_writelane_b32 v40, s37, 5 ; SI-NEXT: s_cbranch_scc0 .LBB105_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v22 -; SI-NEXT: v_or_b32_e32 v37, v10, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v7 -; SI-NEXT: v_or_b32_e32 v32, v9, v8 -; SI-NEXT: v_alignbit_b32 v8, v32, v37, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v32, v37, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v32, v37, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v13 -; SI-NEXT: v_or_b32_e32 v24, v12, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v6 -; SI-NEXT: v_or_b32_e32 v23, v11, v8 -; SI-NEXT: v_alignbit_b32 v8, v23, v24, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v23, v24, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v23, v24, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v46 -; SI-NEXT: v_or_b32_e32 v18, v42, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v5 -; SI-NEXT: v_or_b32_e32 v19, v14, v8 -; SI-NEXT: v_alignbit_b32 v8, v19, v18, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v18, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v19, v18, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v26 -; SI-NEXT: v_or_b32_e32 v16, v25, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v20 -; SI-NEXT: v_or_b32_e32 v17, v28, v8 -; SI-NEXT: v_alignbit_b32 v8, v17, v16, 24 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v17, v16, 16 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v17, v16, 8 -; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v27 -; SI-NEXT: v_or_b32_e32 v15, v21, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v4 -; SI-NEXT: v_or_b32_e32 v14, v62, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v35 -; SI-NEXT: v_or_b32_e32 v12, v34, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v3 -; SI-NEXT: v_or_b32_e32 v13, v30, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v53 -; SI-NEXT: v_or_b32_e32 v10, v50, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v2 -; SI-NEXT: v_or_b32_e32 v11, v48, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41 -; SI-NEXT: v_or_b32_e32 v9, v40, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v1 -; SI-NEXT: v_alignbit_b32 v22, v11, v10, 24 -; SI-NEXT: v_or_b32_e32 v8, v55, v8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v11, v10, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v8, v9, 24 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v8, v9, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v22, v8, v9, 8 -; SI-NEXT: v_alignbit_b32 v57, v14, v15, 24 -; SI-NEXT: v_alignbit_b32 v58, v14, v15, 16 -; SI-NEXT: v_alignbit_b32 v61, v14, v15, 8 -; SI-NEXT: v_alignbit_b32 v44, v13, v12, 24 -; SI-NEXT: v_alignbit_b32 v47, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v56, v13, v12, 8 -; SI-NEXT: v_alignbit_b32 v43, v11, v10, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v32 -; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v23 -; SI-NEXT: v_lshrrev_b32_e32 v36, 8, v19 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v17 -; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v14 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v13 -; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v8 -; SI-NEXT: v_bfe_u32 v54, v7, 8, 8 -; SI-NEXT: v_bfe_u32 v51, v6, 8, 8 -; SI-NEXT: v_bfe_u32 v49, v5, 8, 8 -; SI-NEXT: v_bfe_u32 v38, v20, 8, 8 -; SI-NEXT: v_bfe_u32 v33, v4, 8, 8 -; SI-NEXT: v_bfe_u32 v29, v3, 8, 8 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_bfe_u32 v22, v2, 8, 8 -; SI-NEXT: v_bfe_u32 v60, v1, 8, 8 +; SI-NEXT: v_readfirstlane_b32 s4, v21 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v9 +; SI-NEXT: s_or_b32 s18, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v8 +; SI-NEXT: s_or_b32 s19, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v12 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v11 +; SI-NEXT: s_or_b32 s16, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v6 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v10 +; SI-NEXT: s_or_b32 s17, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v15 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: s_or_b32 s14, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v13 +; SI-NEXT: s_or_b32 s15, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v17 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: s_or_b32 s12, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v20 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v22 +; SI-NEXT: s_or_b32 s13, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v25 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v24 +; SI-NEXT: s_or_b32 s10, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v4 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v23 +; SI-NEXT: s_or_b32 s11, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v28 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v27 +; SI-NEXT: s_or_b32 s8, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v3 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v26 +; SI-NEXT: s_or_b32 s9, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v33 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v31 +; SI-NEXT: s_or_b32 s6, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v2 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v29 +; SI-NEXT: s_or_b32 s7, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s4, v38 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s5, v37 +; SI-NEXT: s_lshr_b64 s[20:21], s[18:19], 24 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_readfirstlane_b32 s21, v35 +; SI-NEXT: s_lshr_b64 s[22:23], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[14:15], 16 +; SI-NEXT: s_or_b32 s5, s21, s5 +; SI-NEXT: s_lshr_b64 s[56:57], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s45, s19, 8 +; SI-NEXT: s_lshr_b32 s43, s17, 8 +; SI-NEXT: s_lshr_b32 s41, s15, 8 +; SI-NEXT: s_lshr_b32 s29, s13, 8 +; SI-NEXT: s_lshr_b32 s27, s11, 8 +; SI-NEXT: s_lshr_b32 s25, s9, 8 +; SI-NEXT: s_lshr_b32 s23, s7, 8 +; SI-NEXT: s_lshr_b32 s21, s5, 8 +; SI-NEXT: v_bfe_u32 v48, v7, 8, 8 +; SI-NEXT: v_bfe_u32 v39, v6, 8, 8 +; SI-NEXT: v_bfe_u32 v36, v5, 8, 8 +; SI-NEXT: v_bfe_u32 v34, v20, 8, 8 +; SI-NEXT: v_bfe_u32 v32, v4, 8, 8 +; SI-NEXT: v_bfe_u32 v30, v3, 8, 8 +; SI-NEXT: v_bfe_u32 v19, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v18, v1, 8, 8 ; SI-NEXT: s_cbranch_execnz .LBB105_3 ; SI-NEXT: .LBB105_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v18, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v41 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_cvt_f32_f16_e32 v9, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v37 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: v_cvt_f32_f16_e32 v10, v55 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_readfirstlane_b32 s4, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v35 +; SI-NEXT: v_readfirstlane_b32 s5, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v33 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v9, v9, v8 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_cvt_f32_f16_e32 v11, v53 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v1 -; SI-NEXT: v_or_b32_e32 v8, v10, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_cvt_f32_f16_e32 v12, v48 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v13, v35 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: v_readfirstlane_b32 s6, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v31 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: v_readfirstlane_b32 s5, v1 +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: v_readfirstlane_b32 s6, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v28 +; SI-NEXT: v_readfirstlane_b32 s7, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v29 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v34 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v2 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: v_readfirstlane_b32 s8, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v27 +; SI-NEXT: v_readfirstlane_b32 s7, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_or_b32_e32 v11, v12, v11 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v30 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: s_lshl_b32 s7, s7, 16 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: s_or_b32 s7, s8, s7 +; SI-NEXT: v_readfirstlane_b32 s8, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v25 +; SI-NEXT: v_readfirstlane_b32 s9, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v26 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v21 -; SI-NEXT: v_or_b32_e32 v12, v14, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v27 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_or_b32_e32 v13, v13, v15 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v62 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_readfirstlane_b32 s10, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: s_lshl_b32 s8, s8, 16 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: s_or_b32 s8, s9, s8 +; SI-NEXT: v_readfirstlane_b32 s9, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v15, v15, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v26 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v4 -; SI-NEXT: v_or_b32_e32 v14, v16, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v46 +; SI-NEXT: v_readfirstlane_b32 s11, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v23 +; SI-NEXT: s_lshl_b32 s9, s9, 16 ; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: s_or_b32 s9, s10, s9 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_readfirstlane_b32 s10, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: s_lshl_b32 s10, s10, 16 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v42 +; SI-NEXT: s_or_b32 s10, s11, s10 +; SI-NEXT: v_readfirstlane_b32 s11, v4 +; SI-NEXT: s_lshl_b32 s11, s11, 16 +; SI-NEXT: v_readfirstlane_b32 s12, v18 +; SI-NEXT: s_or_b32 s11, s12, s11 +; SI-NEXT: v_readfirstlane_b32 s12, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v17, v20 +; SI-NEXT: v_readfirstlane_b32 s13, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v17 +; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 +; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: s_or_b32 s12, s13, s12 +; SI-NEXT: v_readfirstlane_b32 s13, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v5 +; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: s_lshl_b32 s13, s13, 16 +; SI-NEXT: v_readfirstlane_b32 s14, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_readfirstlane_b32 s17, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v21 +; SI-NEXT: s_or_b32 s13, s14, s13 +; SI-NEXT: v_readfirstlane_b32 s14, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: v_readfirstlane_b32 s15, v14 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_or_b32_e32 v17, v18, v17 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v19 -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload -; SI-NEXT: v_or_b32_e32 v18, v21, v18 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: s_or_b32 s14, s15, s14 +; SI-NEXT: v_readfirstlane_b32 s15, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: s_lshl_b32 s15, s15, 16 +; SI-NEXT: v_readfirstlane_b32 s16, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_readfirstlane_b32 s16, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 ; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_readfirstlane_b32 s17, v6 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_alignbit_b32 v57, v14, v15, 24 -; SI-NEXT: v_alignbit_b32 v58, v14, v15, 16 -; SI-NEXT: v_alignbit_b32 v61, v14, v15, 8 -; SI-NEXT: v_alignbit_b32 v44, v13, v12, 24 -; SI-NEXT: v_alignbit_b32 v47, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v56, v13, v12, 8 -; SI-NEXT: v_alignbit_b32 v43, v11, v10, 8 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v17 -; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v14 -; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v13 -; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v8 -; SI-NEXT: v_bfe_u32 v54, v7, 8, 8 -; SI-NEXT: v_bfe_u32 v51, v6, 8, 8 -; SI-NEXT: v_bfe_u32 v49, v5, 8, 8 -; SI-NEXT: v_bfe_u32 v38, v20, 8, 8 -; SI-NEXT: v_bfe_u32 v33, v4, 8, 8 -; SI-NEXT: v_bfe_u32 v29, v3, 8, 8 -; SI-NEXT: v_bfe_u32 v60, v1, 8, 8 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_or_b32_e32 v19, v19, v22 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v23 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_lshrrev_b32_e32 v36, 8, v19 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v24, v22, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v6 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v23, v23, v22 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v23 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_or_b32_e32 v37, v22, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v7 -; SI-NEXT: v_or_b32_e32 v32, v25, v21 -; SI-NEXT: v_alignbit_b32 v21, v32, v37, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v32, v37, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v32, v37, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v23, v24, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v23, v24, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v23, v24, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v19, v18, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v19, v18, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v19, v18, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v17, v16, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v17, v16, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v17, v16, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v11, v10, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v11, v10, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v8, v9, 24 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v8, v9, 16 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v21, v8, v9, 8 -; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v32 -; SI-NEXT: v_bfe_u32 v22, v2, 8, 8 -; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: s_lshl_b32 s17, s17, 16 +; SI-NEXT: v_readfirstlane_b32 s18, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: s_or_b32 s17, s18, s17 +; SI-NEXT: v_readfirstlane_b32 s18, v11 +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: v_readfirstlane_b32 s19, v9 +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: v_readfirstlane_b32 s19, v7 +; SI-NEXT: s_lshl_b32 s19, s19, 16 +; SI-NEXT: v_readfirstlane_b32 s20, v8 +; SI-NEXT: s_or_b32 s19, s20, s19 +; SI-NEXT: s_lshr_b64 s[20:21], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[22:23], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[26:27], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[28:29], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[16:17], 8 +; SI-NEXT: s_lshr_b64 s[40:41], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[44:45], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 24 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[12:13], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[10:11], 24 +; SI-NEXT: s_lshr_b64 s[72:73], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[10:11], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[78:79], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[88:89], s[6:7], 24 +; SI-NEXT: s_lshr_b64 s[92:93], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[6:7], 8 +; SI-NEXT: s_lshr_b64 s[30:31], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[34:35], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[36:37], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s45, s19, 8 +; SI-NEXT: s_lshr_b32 s43, s17, 8 +; SI-NEXT: s_lshr_b32 s41, s15, 8 +; SI-NEXT: s_lshr_b32 s29, s13, 8 +; SI-NEXT: s_lshr_b32 s27, s11, 8 +; SI-NEXT: s_lshr_b32 s25, s9, 8 +; SI-NEXT: s_lshr_b32 s23, s7, 8 +; SI-NEXT: s_lshr_b32 s21, s5, 8 +; SI-NEXT: v_bfe_u32 v48, v7, 8, 8 +; SI-NEXT: v_bfe_u32 v39, v6, 8, 8 +; SI-NEXT: v_bfe_u32 v36, v5, 8, 8 +; SI-NEXT: v_bfe_u32 v34, v20, 8, 8 +; SI-NEXT: v_bfe_u32 v32, v4, 8, 8 +; SI-NEXT: v_bfe_u32 v30, v3, 8, 8 +; SI-NEXT: v_bfe_u32 v19, v2, 8, 8 +; SI-NEXT: v_bfe_u32 v18, v1, 8, 8 ; SI-NEXT: .LBB105_3: ; %end -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xff, v37 +; SI-NEXT: s_and_b32 s18, s18, 0xff +; SI-NEXT: s_lshl_b32 s26, s26, 8 +; SI-NEXT: s_and_b32 s22, s22, 0xff +; SI-NEXT: s_or_b32 s18, s18, s26 +; SI-NEXT: s_lshl_b32 s22, s22, 16 +; SI-NEXT: s_lshl_b32 s20, s20, 24 +; SI-NEXT: s_and_b32 s18, s18, 0xffff +; SI-NEXT: s_or_b32 s20, s20, s22 +; SI-NEXT: s_or_b32 s18, s18, s20 +; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: s_and_b32 s18, s19, 0xff +; SI-NEXT: s_lshl_b32 s19, s45, 8 ; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s18, s18, s19 ; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v25 -; SI-NEXT: v_or_b32_e32 v21, v21, v25 -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v26 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v25, 0xff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 -; SI-NEXT: v_or_b32_e32 v25, v26, v25 -; SI-NEXT: v_or_b32_e32 v21, v21, v25 -; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v52 -; SI-NEXT: v_or_b32_e32 v21, v21, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v54 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v7, v25, v7 -; SI-NEXT: v_or_b32_e32 v7, v21, v7 -; SI-NEXT: v_add_i32_e32 v21, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v7, v21, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v48 +; SI-NEXT: s_and_b32 s18, s18, 0xffff +; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_or_b32_e32 v7, s18, v7 +; SI-NEXT: s_and_b32 s16, s16, 0xff +; SI-NEXT: s_lshl_b32 s18, s42, 8 +; SI-NEXT: s_or_b32 s16, s16, s18 +; SI-NEXT: s_and_b32 s18, s28, 0xff +; SI-NEXT: s_lshl_b32 s18, s18, 16 +; SI-NEXT: s_lshl_b32 s19, s24, 24 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s18, s19, s18 +; SI-NEXT: v_add_i32_e32 v8, vcc, 4, v0 +; SI-NEXT: s_or_b32 s16, s16, s18 +; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v24 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21 -; SI-NEXT: v_or_b32_e32 v7, v7, v21 -; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v21, v24, v21 -; SI-NEXT: v_or_b32_e32 v7, v7, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v7, v21, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v39 -; SI-NEXT: v_or_b32_e32 v7, v7, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v51 -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: v_or_b32_e32 v6, v21, v6 +; SI-NEXT: v_add_i32_e32 v7, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v8, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xff +; SI-NEXT: s_lshl_b32 s17, s43, 8 +; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 +; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v39 +; SI-NEXT: s_and_b32 s16, s16, 0xffff ; SI-NEXT: v_or_b32_e32 v6, v7, v6 +; SI-NEXT: v_or_b32_e32 v6, s16, v6 +; SI-NEXT: s_and_b32 s14, s14, 0xff +; SI-NEXT: s_lshl_b32 s16, s56, 8 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: s_and_b32 s16, s44, 0xff +; SI-NEXT: s_lshl_b32 s16, s16, 16 +; SI-NEXT: s_lshl_b32 s17, s40, 24 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 ; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v18 -; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v18 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v18, v7 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v36 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v49 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 +; SI-NEXT: v_add_i32_e32 v6, vcc, 16, v0 +; SI-NEXT: v_mov_b32_e32 v7, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xff +; SI-NEXT: s_lshl_b32 s15, s41, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v5 +; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v36 +; SI-NEXT: s_and_b32 s14, s14, 0xffff ; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v5, s14, v5 +; SI-NEXT: s_and_b32 s12, s12, 0xff +; SI-NEXT: s_lshl_b32 s14, s62, 8 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: s_and_b32 s14, s58, 0xff +; SI-NEXT: s_lshl_b32 s14, s14, 16 +; SI-NEXT: s_lshl_b32 s15, s46, 24 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_or_b32 s14, s15, s14 ; SI-NEXT: v_add_i32_e32 v6, vcc, 20, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 ; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v16 -; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v5, vcc, 24, v0 +; SI-NEXT: v_mov_b32_e32 v6, s12 +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen +; SI-NEXT: s_and_b32 s12, s13, 0xff +; SI-NEXT: s_lshl_b32 s13, s29, 8 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v20 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v31 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v38 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v34 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: v_or_b32_e32 v5, s12, v5 +; SI-NEXT: s_and_b32 s10, s10, 0xff +; SI-NEXT: s_lshl_b32 s12, s76, 8 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: s_and_b32 s12, s72, 0xff +; SI-NEXT: s_lshl_b32 s12, s12, 16 +; SI-NEXT: s_lshl_b32 s13, s60, 24 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_or_b32 s12, s13, s12 ; SI-NEXT: v_add_i32_e32 v6, vcc, 28, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 ; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v61 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v58 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v57 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v6, v7, v6 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v28 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v33 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_or_b32_e32 v4, v6, v4 +; SI-NEXT: v_add_i32_e32 v5, vcc, 32, v0 +; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xff +; SI-NEXT: s_lshl_b32 s11, s27, 8 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v32 +; SI-NEXT: s_and_b32 s10, s10, 0xffff ; SI-NEXT: v_or_b32_e32 v4, v5, v4 +; SI-NEXT: v_or_b32_e32 v4, s10, v4 +; SI-NEXT: s_and_b32 s8, s8, 0xff +; SI-NEXT: s_lshl_b32 s10, s90, 8 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: s_and_b32 s10, s78, 0xff +; SI-NEXT: s_lshl_b32 s10, s10, 16 +; SI-NEXT: s_lshl_b32 s11, s74, 24 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_or_b32 s10, s11, s10 ; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 ; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v56 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_and_b32_e32 v5, 0xff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v44 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v5, v6, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v63 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v29 -; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 -; SI-NEXT: v_or_b32_e32 v3, v5, v3 +; SI-NEXT: v_add_i32_e32 v4, vcc, 40, v0 +; SI-NEXT: v_mov_b32_e32 v5, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xff +; SI-NEXT: s_lshl_b32 s9, s25, 8 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v30 +; SI-NEXT: s_and_b32 s8, s8, 0xffff ; SI-NEXT: v_or_b32_e32 v3, v4, v3 +; SI-NEXT: v_or_b32_e32 v3, s8, v3 +; SI-NEXT: s_and_b32 s6, s6, 0xff +; SI-NEXT: s_lshl_b32 s8, s94, 8 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: s_and_b32 s8, s92, 0xff +; SI-NEXT: s_lshl_b32 s8, s8, 16 +; SI-NEXT: s_lshl_b32 s9, s88, 24 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s8, s9, s8 ; SI-NEXT: v_add_i32_e32 v4, vcc, 44, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 ; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v43 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; SI-NEXT: v_or_b32_e32 v4, v5, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v59 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v22 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v2, v4, v2 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: v_mov_b32_e32 v4, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xff +; SI-NEXT: s_lshl_b32 s7, s23, 8 +; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 +; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v19 +; SI-NEXT: s_and_b32 s6, s6, 0xffff ; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: v_or_b32_e32 v2, s6, v2 +; SI-NEXT: s_and_b32 s4, s4, 0xff +; SI-NEXT: s_lshl_b32 s6, s36, 8 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: s_and_b32 s6, s34, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s30, 24 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v9 -; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v4, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 ; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v45 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v60 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xff +; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v18 +; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_or_b32_e32 v1, s4, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: v_readlane_b32 s37, v40, 5 +; SI-NEXT: v_readlane_b32 s36, v40, 4 +; SI-NEXT: v_readlane_b32 s35, v40, 3 +; SI-NEXT: v_readlane_b32 s34, v40, 2 +; SI-NEXT: v_readlane_b32 s31, v40, 1 +; SI-NEXT: v_readlane_b32 s30, v40, 0 +; SI-NEXT: s_or_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB105_4: -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $sgpr45 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr43 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr41 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr61 -; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; kill: killed $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr29 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr27 +; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr25 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr21 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $sgpr36 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: s_branch .LBB105_2 ; ; VI-LABEL: bitcast_v32f16_to_v64i8_scalar: @@ -85721,728 +86232,500 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a, ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mul_f32_e64 v19, 1.0, s17 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_mul_f32_e32 v62, 1.0, v2 -; SI-NEXT: v_mul_f32_e32 v23, 1.0, v1 -; SI-NEXT: v_mul_f32_e32 v38, 1.0, v4 -; SI-NEXT: v_mul_f32_e32 v48, 1.0, v3 -; SI-NEXT: v_mul_f32_e32 v34, 1.0, v6 -; SI-NEXT: v_mul_f32_e32 v37, 1.0, v5 -; SI-NEXT: v_mul_f32_e32 v54, 1.0, v8 -; SI-NEXT: v_mul_f32_e32 v40, 1.0, v7 -; SI-NEXT: v_mul_f32_e32 v51, 1.0, v10 -; SI-NEXT: v_mul_f32_e32 v53, 1.0, v9 -; SI-NEXT: v_mul_f32_e32 v44, 1.0, v12 -; SI-NEXT: v_mul_f32_e32 v45, 1.0, v11 -; SI-NEXT: v_mul_f32_e32 v42, 1.0, v14 -; SI-NEXT: v_mul_f32_e32 v43, 1.0, v13 -; SI-NEXT: v_mul_f32_e32 v57, 1.0, v16 -; SI-NEXT: v_mul_f32_e32 v58, 1.0, v15 -; SI-NEXT: v_mul_f32_e32 v47, 1.0, v18 -; SI-NEXT: v_mul_f32_e32 v56, 1.0, v17 +; SI-NEXT: v_mul_f32_e32 v33, 1.0, v2 +; SI-NEXT: v_mul_f32_e32 v27, 1.0, v1 +; SI-NEXT: v_mul_f32_e32 v50, 1.0, v4 +; SI-NEXT: v_mul_f32_e32 v52, 1.0, v3 +; SI-NEXT: v_mul_f32_e32 v39, 1.0, v6 +; SI-NEXT: v_mul_f32_e32 v49, 1.0, v5 +; SI-NEXT: v_mul_f32_e32 v44, 1.0, v8 +; SI-NEXT: v_mul_f32_e32 v46, 1.0, v7 +; SI-NEXT: v_mul_f32_e32 v40, 1.0, v10 +; SI-NEXT: v_mul_f32_e32 v43, 1.0, v9 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mul_f32_e32 v61, 1.0, v12 +; SI-NEXT: v_mul_f32_e32 v25, 1.0, v11 +; SI-NEXT: v_mul_f32_e32 v57, 1.0, v14 +; SI-NEXT: v_mul_f32_e32 v60, 1.0, v13 +; SI-NEXT: v_mul_f32_e32 v36, 1.0, v16 +; SI-NEXT: v_mul_f32_e32 v37, 1.0, v15 +; SI-NEXT: v_mul_f32_e32 v32, 1.0, v18 +; SI-NEXT: v_mul_f32_e32 v34, 1.0, v17 ; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v46, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v26, 1.0, s19 ; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v28, 1.0, s20 -; SI-NEXT: v_mul_f32_e64 v61, 1.0, s23 +; SI-NEXT: v_mul_f32_e64 v6, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v29, 1.0, s23 ; SI-NEXT: v_mul_f32_e64 v4, 1.0, s22 -; SI-NEXT: v_mul_f32_e64 v36, 1.0, s25 -; SI-NEXT: v_mul_f32_e64 v39, 1.0, s24 -; SI-NEXT: v_mul_f32_e64 v31, 1.0, s27 -; SI-NEXT: v_mul_f32_e64 v35, 1.0, s26 -; SI-NEXT: v_mul_f32_e64 v52, 1.0, s29 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s25 +; SI-NEXT: v_mul_f32_e64 v38, 1.0, s24 +; SI-NEXT: v_mul_f32_e64 v35, 1.0, s27 +; SI-NEXT: v_mul_f32_e64 v7, 1.0, s26 +; SI-NEXT: v_mul_f32_e64 v53, 1.0, s29 ; SI-NEXT: v_mul_f32_e64 v55, 1.0, s28 -; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill -; SI-NEXT: s_cbranch_scc0 .LBB109_2 +; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: s_cbranch_scc0 .LBB109_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v46 -; SI-NEXT: v_alignbit_b32 v27, v1, v3, 16 -; SI-NEXT: v_alignbit_b32 v30, v24, v2, 16 -; SI-NEXT: v_alignbit_b32 v1, v30, v27, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v30, v27, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v30, v27, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_alignbit_b32 v23, v1, v3, 16 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v61 -; SI-NEXT: v_alignbit_b32 v21, v1, v28, 16 -; SI-NEXT: v_alignbit_b32 v19, v17, v4, 16 -; SI-NEXT: v_alignbit_b32 v1, v19, v21, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v19, v21, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v19, v21, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v36 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v31 -; SI-NEXT: v_alignbit_b32 v15, v1, v39, 16 -; SI-NEXT: v_alignbit_b32 v16, v13, v35, 16 -; SI-NEXT: v_alignbit_b32 v1, v16, v15, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v15, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v16, v15, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v52 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v62 -; SI-NEXT: v_alignbit_b32 v10, v1, v55, 16 -; SI-NEXT: v_alignbit_b32 v11, v9, v23, 16 -; SI-NEXT: v_alignbit_b32 v1, v11, v10, 24 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v11, v10, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v1, v11, v10, 8 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v38 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v34 -; SI-NEXT: v_alignbit_b32 v6, v1, v48, 16 -; SI-NEXT: v_alignbit_b32 v20, v18, v37, 16 -; SI-NEXT: v_alignbit_b32 v4, v20, v6, 24 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v54 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v20, v6, 16 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v51 -; SI-NEXT: v_alignbit_b32 v3, v1, v40, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v20, v6, 8 -; SI-NEXT: v_alignbit_b32 v14, v12, v53, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v14, v3, 24 +; SI-NEXT: v_alignbit_b32 v20, v1, v6, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8 +; SI-NEXT: v_alignbit_b32 v17, v1, v38, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v53 +; SI-NEXT: v_alignbit_b32 v14, v1, v55, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v50 +; SI-NEXT: v_alignbit_b32 v11, v1, v52, 16 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v44 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v14, v3, 16 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v42 -; SI-NEXT: v_alignbit_b32 v2, v1, v45, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v14, v3, 8 -; SI-NEXT: v_alignbit_b32 v8, v7, v43, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v8, v2, 24 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v8, v2, 16 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v4, v8, v2, 8 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v57 -; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v47 -; SI-NEXT: v_alignbit_b32 v1, v1, v58, 16 -; SI-NEXT: v_alignbit_b32 v5, v4, v56, 16 -; SI-NEXT: v_alignbit_b32 v25, v5, v1, 24 -; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v14 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v5, v1, 16 -; SI-NEXT: v_lshrrev_b32_e32 v22, 8, v11 -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v8 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v5, v1, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v34 -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v29 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v47 -; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v46 -; SI-NEXT: v_lshrrev_b32_e32 v50, 8, v30 -; SI-NEXT: v_lshrrev_b32_e32 v49, 24, v61 -; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v19 -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v31 +; SI-NEXT: v_alignbit_b32 v8, v1, v46, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61 +; SI-NEXT: v_alignbit_b32 v21, v19, v4, 16 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v35 +; SI-NEXT: v_alignbit_b32 v4, v1, v25, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v36 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v26 +; SI-NEXT: v_alignbit_b32 v18, v16, v7, 16 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v39 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v40 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v57 +; SI-NEXT: v_alignbit_b32 v3, v1, v37, 16 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_alignbit_b32 v24, v22, v2, 16 +; SI-NEXT: v_alignbit_b32 v15, v13, v27, 16 +; SI-NEXT: v_alignbit_b32 v12, v10, v49, 16 +; SI-NEXT: v_alignbit_b32 v9, v7, v43, 16 +; SI-NEXT: v_alignbit_b32 v5, v6, v60, 16 +; SI-NEXT: v_alignbit_b32 v2, v1, v34, 16 +; SI-NEXT: v_readfirstlane_b32 s8, v23 +; SI-NEXT: v_readfirstlane_b32 s9, v24 +; SI-NEXT: v_readfirstlane_b32 s14, v20 +; SI-NEXT: v_readfirstlane_b32 s15, v21 +; SI-NEXT: v_readfirstlane_b32 s20, v17 +; SI-NEXT: v_readfirstlane_b32 s21, v18 +; SI-NEXT: v_readfirstlane_b32 s26, v14 +; SI-NEXT: v_readfirstlane_b32 s27, v15 +; SI-NEXT: v_readfirstlane_b32 s42, v11 +; SI-NEXT: v_readfirstlane_b32 s43, v12 +; SI-NEXT: v_readfirstlane_b32 s56, v8 +; SI-NEXT: v_readfirstlane_b32 s57, v9 +; SI-NEXT: v_readfirstlane_b32 s62, v4 +; SI-NEXT: v_readfirstlane_b32 s63, v5 +; SI-NEXT: v_readfirstlane_b32 s76, v3 +; SI-NEXT: v_readfirstlane_b32 s77, v2 +; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[8:9], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[18:19], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[22:23], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[20:21], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[24:25], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[26:27], 8 +; SI-NEXT: s_lshr_b64 s[26:27], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[40:41], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[42:43], s[56:57], 24 +; SI-NEXT: s_lshr_b64 s[46:47], s[56:57], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[56:57], 8 +; SI-NEXT: s_lshr_b64 s[56:57], s[62:63], 24 +; SI-NEXT: s_lshr_b64 s[60:61], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[62:63], 8 +; SI-NEXT: s_lshr_b64 s[62:63], s[76:77], 24 +; SI-NEXT: s_lshr_b64 s[74:75], s[76:77], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[76:77], 8 +; SI-NEXT: v_lshrrev_b32_e32 v30, 24, v26 +; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v24 +; SI-NEXT: v_lshrrev_b32_e32 v62, 24, v29 +; SI-NEXT: v_lshrrev_b32_e32 v58, 8, v21 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v35 +; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v18 +; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v33 +; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v15 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v39 +; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v12 +; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v40 +; SI-NEXT: v_lshrrev_b32_e32 v41, 8, v9 +; SI-NEXT: v_lshrrev_b32_e32 v42, 24, v57 +; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v32 +; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v2 +; SI-NEXT: s_cbranch_execnz .LBB109_3 +; SI-NEXT: .LBB109_2: ; %cmp.true +; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v53 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v55 +; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14 +; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 +; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; SI-NEXT: v_alignbit_b32 v14, v14, v13, 16 +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v38 +; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v36 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v16 -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v62 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v20 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v51 -; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v42 -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v5 -; SI-NEXT: s_branch .LBB109_3 -; SI-NEXT: .LBB109_2: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; kill: killed $vgpr4 -; SI-NEXT: s_mov_b64 s[4:5], -1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; kill: killed $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr19 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr15 -; SI-NEXT: ; implicit-def: $vgpr16 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr13 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $vgpr7 -; SI-NEXT: ; implicit-def: $vgpr63 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; kill: killed $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; kill: killed $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; kill: killed $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: .LBB109_3: ; %Flow -; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_mov_b32_e32 v59, v63 -; SI-NEXT: v_mov_b32_e32 v63, v22 -; SI-NEXT: s_cbranch_vccnz .LBB109_5 -; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v28 -; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v57 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v58 +; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v44 +; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v50 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v37 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v46 +; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v52 +; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v47 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v56 -; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v29 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v44 -; SI-NEXT: v_alignbit_b32 v5, v4, v2, 16 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v45 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v42 -; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v43 -; SI-NEXT: v_add_f32_e32 v42, 0x40c00000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v42 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v54 -; SI-NEXT: v_alignbit_b32 v8, v7, v3, 16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v40 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v51 -; SI-NEXT: v_alignbit_b32 v3, v6, v3, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v53 -; SI-NEXT: v_add_f32_e32 v51, 0x40c00000, v9 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v51 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v38 -; SI-NEXT: v_alignbit_b32 v14, v12, v6, 16 -; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v48 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36 -; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v34 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v39 -; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15 -; SI-NEXT: v_alignbit_b32 v6, v9, v6, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v37 -; SI-NEXT: v_add_f32_e32 v34, 0x40c00000, v10 -; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13 -; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v34 -; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v52 -; SI-NEXT: v_alignbit_b32 v15, v15, v13, 16 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v35 -; SI-NEXT: v_alignbit_b32 v20, v18, v9, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v55 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v61 +; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7 +; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8 ; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10 -; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v13 -; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v31 -; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v13 -; SI-NEXT: v_alignbit_b32 v10, v10, v9, 16 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v23 -; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v26 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v62 -; SI-NEXT: v_alignbit_b32 v16, v13, v16, 16 -; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v9 -; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v23 -; SI-NEXT: v_alignbit_b32 v11, v9, v11, 16 -; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v11 -; SI-NEXT: v_lshrrev_b32_e32 v33, 24, v26 -; SI-NEXT: v_lshrrev_b32_e32 v26, 24, v23 -; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_alignbit_b32 v3, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v34 +; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v25 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_alignbit_b32 v8, v8, v7, 16 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43 +; SI-NEXT: v_alignbit_b32 v11, v11, v10, 16 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v49 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v32 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v57 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v7 +; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v40 +; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v10 +; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v39 +; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v1 +; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16 +; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v60 +; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v6 +; SI-NEXT: v_add_f32_e32 v34, 0x40c00000, v7 +; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v10 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v25 +; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v34 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v36 +; SI-NEXT: v_alignbit_b32 v2, v1, v2, 16 +; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16 +; SI-NEXT: v_alignbit_b32 v9, v7, v9, 16 +; SI-NEXT: v_alignbit_b32 v12, v10, v12, 16 +; SI-NEXT: v_readfirstlane_b32 s76, v3 +; SI-NEXT: v_readfirstlane_b32 s77, v2 +; SI-NEXT: v_readfirstlane_b32 s62, v4 +; SI-NEXT: v_readfirstlane_b32 s63, v5 +; SI-NEXT: v_readfirstlane_b32 s56, v8 +; SI-NEXT: v_readfirstlane_b32 s57, v9 +; SI-NEXT: v_readfirstlane_b32 s42, v11 +; SI-NEXT: v_readfirstlane_b32 s43, v12 +; SI-NEXT: v_readfirstlane_b32 s26, v14 +; SI-NEXT: s_lshr_b64 s[40:41], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[42:43], 8 +; SI-NEXT: s_lshr_b64 s[46:47], s[56:57], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[56:57], 8 +; SI-NEXT: s_lshr_b64 s[60:61], s[62:63], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[62:63], 8 +; SI-NEXT: s_lshr_b64 s[74:75], s[76:77], 16 +; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v12 +; SI-NEXT: v_lshrrev_b32_e32 v41, 8, v9 +; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v5 +; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v2 +; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v36 +; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v34 +; SI-NEXT: v_lshrrev_b32_e32 v42, 24, v25 +; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v32 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(4) +; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17 +; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v17 +; SI-NEXT: v_alignbit_b32 v17, v17, v16, 16 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: v_readfirstlane_b32 s20, v17 +; SI-NEXT: s_waitcnt vmcnt(4) ; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25 -; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v25 -; SI-NEXT: v_alignbit_b32 v21, v19, v17, 16 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload -; SI-NEXT: v_alignbit_b32 v27, v25, v24, 16 -; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload -; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v51 -; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v42 -; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v5 -; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 -; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v17 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 -; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v24 -; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v46 -; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v24 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v28 -; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v61 -; SI-NEXT: v_alignbit_b32 v30, v24, v25, 16 -; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v17 -; SI-NEXT: v_alignbit_b32 v25, v30, v27, 24 -; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v22 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v30, v27, 16 -; SI-NEXT: v_alignbit_b32 v19, v17, v19, 16 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v30, v27, 8 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v19, v21, 24 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v19, v21, 16 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v19, v21, 8 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v16, v15, 24 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v16, v15, 16 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v16, v15, 8 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v11, v10, 24 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v11, v10, 16 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v11, v10, 8 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v20, v6, 24 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v20, v6, 16 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v20, v6, 8 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v14, v3, 24 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v14, v3, 16 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v14, v3, 8 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v8, v2, 24 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v8, v2, 16 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v8, v2, 8 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v5, v1, 24 -; SI-NEXT: v_lshrrev_b32_e32 v49, 24, v22 -; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v34 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v5, v1, 16 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v29 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_alignbit_b32 v25, v5, v1, 8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v22, 8, v14 -; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill -; SI-NEXT: v_lshrrev_b32_e32 v50, 8, v30 -; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v19 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v16 -; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v28 -; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v20 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v22, 8, v8 -; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill -; SI-NEXT: .LBB109_5: ; %end -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v27 -; SI-NEXT: v_and_b32_e32 v21, 0xff, v21 -; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_and_b32_e32 v15, 0xff, v15 -; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 -; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v23 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20 +; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23 +; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19 +; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20 +; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23 +; SI-NEXT: v_alignbit_b32 v20, v20, v19, 16 +; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: v_alignbit_b32 v23, v23, v22, 16 +; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v13 +; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33 +; SI-NEXT: v_add_f32_e32 v18, 0x40c00000, v16 +; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v35 +; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v13 +; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v16 +; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v33 +; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v31 +; SI-NEXT: v_alignbit_b32 v15, v13, v15, 16 +; SI-NEXT: v_alignbit_b32 v18, v16, v18, 16 +; SI-NEXT: v_readfirstlane_b32 s27, v15 +; SI-NEXT: v_readfirstlane_b32 s21, v18 +; SI-NEXT: v_readfirstlane_b32 s14, v20 +; SI-NEXT: v_readfirstlane_b32 s8, v23 +; SI-NEXT: s_lshr_b64 s[18:19], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[22:23], s[20:21], 8 +; SI-NEXT: s_lshr_b64 s[24:25], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[28:29], s[26:27], 8 +; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v18 +; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v15 +; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v31 +; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v33 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; SI-NEXT: v_add_f32_e32 v21, 0x40c00000, v19 ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v29 +; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v22 +; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v26 +; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v19 +; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v22 +; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v26 +; SI-NEXT: v_alignbit_b32 v21, v19, v21, 16 +; SI-NEXT: v_alignbit_b32 v24, v22, v24, 16 +; SI-NEXT: v_readfirstlane_b32 s15, v21 +; SI-NEXT: v_readfirstlane_b32 s9, v24 +; SI-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[8:9], 8 +; SI-NEXT: s_lshr_b64 s[8:9], s[14:15], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[16:17], s[14:15], 8 +; SI-NEXT: s_lshr_b64 s[14:15], s[20:21], 24 +; SI-NEXT: s_lshr_b64 s[20:21], s[26:27], 24 +; SI-NEXT: s_lshr_b64 s[26:27], s[42:43], 24 +; SI-NEXT: s_lshr_b64 s[42:43], s[56:57], 24 +; SI-NEXT: s_lshr_b64 s[56:57], s[62:63], 24 +; SI-NEXT: s_lshr_b64 s[62:63], s[76:77], 24 +; SI-NEXT: s_lshr_b64 s[76:77], s[76:77], 8 +; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v24 +; SI-NEXT: v_lshrrev_b32_e32 v58, 8, v21 +; SI-NEXT: v_lshrrev_b32_e32 v30, 24, v26 +; SI-NEXT: v_lshrrev_b32_e32 v62, 24, v29 +; SI-NEXT: .LBB109_3: ; %end ; SI-NEXT: v_and_b32_e32 v23, 0xff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_or_b32_e32 v23, v27, v23 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s5, s10, 8 +; SI-NEXT: v_or_b32_e32 v23, s5, v23 +; SI-NEXT: s_and_b32 s5, s6, 0xff +; SI-NEXT: s_lshl_b32 s5, s5, 16 +; SI-NEXT: s_lshl_b32 s4, s4, 24 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: v_or_b32_e32 v23, s4, v23 +; SI-NEXT: buffer_store_dword v23, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v22, 0xff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v50 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 ; SI-NEXT: v_and_b32_e32 v23, 0xff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v41 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_or_b32_e32 v23, v24, v23 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v22 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v23 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v27 ; SI-NEXT: v_and_b32_e32 v22, 0xff, v22 +; SI-NEXT: v_and_b32_e32 v20, 0xff, v20 +; SI-NEXT: s_lshl_b32 s4, s16, 8 +; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v30 +; SI-NEXT: v_or_b32_e32 v20, s4, v20 +; SI-NEXT: s_and_b32 s4, s12, 0xff +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v22, v24, v22 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s8, 24 ; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 +; SI-NEXT: v_add_i32_e32 v23, vcc, 4, v0 +; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v20, s4, v20 +; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_add_i32_e32 v22, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v32 -; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v49 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_or_b32_e32 v17, v21, v17 -; SI-NEXT: v_or_b32_e32 v17, v19, v17 -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 +; SI-NEXT: v_and_b32_e32 v20, 0xff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v58 +; SI-NEXT: v_and_b32_e32 v19, 0xff, v19 +; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 +; SI-NEXT: s_lshl_b32 s4, s22, 8 +; SI-NEXT: v_or_b32_e32 v20, v20, v21 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v62 +; SI-NEXT: v_or_b32_e32 v17, s4, v17 +; SI-NEXT: s_and_b32 s4, s18, 0xff +; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_or_b32_e32 v19, v21, v19 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s14, 24 +; SI-NEXT: v_or_b32_e32 v19, v20, v19 +; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v17, s4, v17 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_i32_e32 v19, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v17 -; SI-NEXT: v_or_b32_e32 v15, v15, v17 -; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v19 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v17, 0xff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v17, v19, v17 -; SI-NEXT: v_or_b32_e32 v15, v15, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v17, 0xff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 8, v28 +; SI-NEXT: v_and_b32_e32 v16, 0xff, v16 +; SI-NEXT: v_and_b32_e32 v14, 0xff, v14 +; SI-NEXT: s_lshl_b32 s4, s28, 8 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v31 +; SI-NEXT: v_or_b32_e32 v14, s4, v14 +; SI-NEXT: s_and_b32 s4, s24, 0xff +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: v_or_b32_e32 v16, v18, v16 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s20, 24 +; SI-NEXT: v_or_b32_e32 v16, v17, v16 +; SI-NEXT: v_add_i32_e32 v17, vcc, 20, v0 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v14, s4, v14 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v25 -; SI-NEXT: v_or_b32_e32 v15, v15, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v33 -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 -; SI-NEXT: v_or_b32_e32 v13, v16, v13 -; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v16, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v13 -; SI-NEXT: v_or_b32_e32 v10, v10, v13 -; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v15 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v14, 0xff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v59 ; SI-NEXT: v_and_b32_e32 v13, 0xff, v13 +; SI-NEXT: v_and_b32_e32 v11, 0xff, v11 +; SI-NEXT: s_lshl_b32 s4, s44, 8 +; SI-NEXT: v_or_b32_e32 v14, v14, v15 ; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v63 +; SI-NEXT: v_or_b32_e32 v11, s4, v11 +; SI-NEXT: s_and_b32 s4, s40, 0xff +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 ; SI-NEXT: v_or_b32_e32 v13, v15, v13 -; SI-NEXT: v_or_b32_e32 v10, v10, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s26, 24 +; SI-NEXT: v_or_b32_e32 v13, v14, v13 +; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v11, s4, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xff, v11 -; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v26 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_or_b32_e32 v9, v11, v9 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; SI-NEXT: v_or_b32_e32 v6, v6, v9 -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v6, v6, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v6, v9, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: v_and_b32_e32 v11, 0xff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v47 +; SI-NEXT: v_and_b32_e32 v10, 0xff, v10 +; SI-NEXT: v_and_b32_e32 v8, 0xff, v8 +; SI-NEXT: s_lshl_b32 s4, s58, 8 +; SI-NEXT: v_or_b32_e32 v11, v11, v12 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v56 +; SI-NEXT: v_or_b32_e32 v8, s4, v8 +; SI-NEXT: s_and_b32 s4, s46, 0xff +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: v_or_b32_e32 v10, v12, v10 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s42, 24 +; SI-NEXT: v_or_b32_e32 v10, v11, v10 +; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 +; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v8, s4, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v29 -; SI-NEXT: v_or_b32_e32 v6, v6, v9 -; SI-NEXT: v_and_b32_e32 v9, 0xff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_or_b32_e32 v6, v6, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v6, v9, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v8, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v63 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_and_b32_e32 v6, 0xff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: v_or_b32_e32 v3, v3, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v8, 0xff, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v41 +; SI-NEXT: v_and_b32_e32 v7, 0xff, v7 +; SI-NEXT: v_and_b32_e32 v4, 0xff, v4 +; SI-NEXT: s_lshl_b32 s4, s72, 8 +; SI-NEXT: v_or_b32_e32 v8, v8, v9 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v45 +; SI-NEXT: v_or_b32_e32 v4, s4, v4 +; SI-NEXT: s_and_b32 s4, s60, 0xff +; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_or_b32_e32 v7, v9, v7 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s56, 24 +; SI-NEXT: v_or_b32_e32 v7, v8, v7 +; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0 +; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v4, s4, v4 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v59 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_or_b32_e32 v3, v6, v3 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v4, 0xff, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v54 +; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_and_b32_e32 v5, 0xff, v6 +; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 +; SI-NEXT: s_lshl_b32 s4, s76, 8 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v42 +; SI-NEXT: v_or_b32_e32 v3, s4, v3 +; SI-NEXT: s_and_b32 s4, s74, 0xff +; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; SI-NEXT: v_or_b32_e32 v5, v6, v5 +; SI-NEXT: s_lshl_b32 s4, s4, 16 +; SI-NEXT: s_lshl_b32 s5, s62, 24 +; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: v_add_i32_e32 v5, vcc, 52, v0 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen +; SI-NEXT: v_or_b32_e32 v3, s4, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v4, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v2, 0xff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v60 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 -; SI-NEXT: v_and_b32_e32 v2, 0xff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v48 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v1 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v51 +; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_or_b32_e32 v1, v2, v1 ; SI-NEXT: v_add_i32_e32 v0, vcc, 60, v0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload @@ -86462,6 +86745,72 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a, ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: .LBB109_4: +; SI-NEXT: ; implicit-def: $vgpr23 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr22 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr20 +; SI-NEXT: ; implicit-def: $sgpr16 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $vgpr21 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr19 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr17 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr18 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $vgpr18 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr16 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr14 +; SI-NEXT: ; implicit-def: $sgpr28 +; SI-NEXT: ; implicit-def: $sgpr24 +; SI-NEXT: ; implicit-def: $sgpr20 +; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr26 +; SI-NEXT: ; implicit-def: $vgpr12 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr10 +; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr8 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr6 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $vgpr2 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: s_branch .LBB109_2 ; ; VI-LABEL: bitcast_v32bf16_to_v64i8_scalar: ; VI: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll index e66762f1e02c2..a1c0a87b65e02 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll @@ -2760,216 +2760,214 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 -; SI-NEXT: v_readfirstlane_b32 s9, v1 -; SI-NEXT: v_readfirstlane_b32 s8, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v3 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v4 +; SI-NEXT: v_readfirstlane_b32 s6, v1 +; SI-NEXT: v_readfirstlane_b32 s7, v2 +; SI-NEXT: v_readfirstlane_b32 s4, v3 +; SI-NEXT: s_and_b64 s[8:9], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v4 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v4, s26 -; SI-NEXT: v_mov_b32_e32 v5, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v7, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s27, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s25, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s21, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s19, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s17, v9, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 16 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s27, 16 -; SI-NEXT: s_lshr_b32 s14, s25, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 16 -; SI-NEXT: s_lshr_b32 s40, s21, 16 -; SI-NEXT: s_lshr_b32 s41, s19, 16 -; SI-NEXT: s_lshr_b32 s42, s17, 16 +; SI-NEXT: s_lshr_b32 s60, s5, 16 +; SI-NEXT: s_lshr_b32 s61, s7, 16 +; SI-NEXT: s_lshr_b32 s62, s29, 16 +; SI-NEXT: s_lshr_b32 s63, s27, 16 +; SI-NEXT: s_lshr_b32 s72, s25, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 16 +; SI-NEXT: s_lshr_b32 s74, s21, 16 +; SI-NEXT: s_lshr_b32 s75, s19, 16 +; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s8, s8, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v4, s26 -; SI-NEXT: v_mov_b32_e32 v5, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v7, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s27, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s25, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s21, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s19, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s17, v9, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 16 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s27, 16 -; SI-NEXT: s_lshr_b32 s14, s25, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 16 -; SI-NEXT: s_lshr_b32 s40, s21, 16 -; SI-NEXT: s_lshr_b32 s41, s19, 16 -; SI-NEXT: s_lshr_b32 s42, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s60, s5, 16 +; SI-NEXT: s_lshr_b32 s61, s7, 16 +; SI-NEXT: s_lshr_b32 s62, s29, 16 +; SI-NEXT: s_lshr_b32 s63, s27, 16 +; SI-NEXT: s_lshr_b32 s72, s25, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 16 +; SI-NEXT: s_lshr_b32 s74, s21, 16 +; SI-NEXT: s_lshr_b32 s75, s19, 16 +; SI-NEXT: s_lshr_b32 s76, s17, 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: s_lshl_b32 s9, s56, 16 +; SI-NEXT: s_and_b32 s11, s16, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_mov_b32_e32 v1, s9 +; SI-NEXT: s_and_b32 s9, s17, 0xffff +; SI-NEXT: s_lshl_b32 s11, s76, 16 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s18, 0xffff +; SI-NEXT: s_lshl_b32 s11, s46, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s19, 0xffff +; SI-NEXT: s_lshl_b32 s11, s75, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s13, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s20, 0xffff +; SI-NEXT: s_lshl_b32 s11, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s12, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s21, 0xffff +; SI-NEXT: s_lshl_b32 s11, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s11, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s22, 0xffff +; SI-NEXT: s_lshl_b32 s11, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s10, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s23, 0xffff +; SI-NEXT: s_lshl_b32 s11, s73, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s24, 0xffff +; SI-NEXT: s_lshl_b32 s11, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s25, 0xffff +; SI-NEXT: s_lshl_b32 s11, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s26, 0xffff +; SI-NEXT: s_lshl_b32 s11, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s27, 0xffff +; SI-NEXT: s_lshl_b32 s11, s63, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s28, 0xffff +; SI-NEXT: s_lshl_b32 s11, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s29, 0xffff +; SI-NEXT: s_lshl_b32 s11, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s9, s10, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s6, s6, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s61, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s8, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr63 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr60 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v18i32_to_v36i16_scalar: @@ -9746,207 +9744,207 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 -; SI-NEXT: v_mov_b32_e32 v19, s16 -; SI-NEXT: v_mov_b32_e32 v18, s17 +; SI-NEXT: v_mov_b32_e32 v18, s16 +; SI-NEXT: v_mov_b32_e32 v19, s17 ; SI-NEXT: v_mov_b32_e32 v16, s18 -; SI-NEXT: v_mov_b32_e32 v15, s19 +; SI-NEXT: v_mov_b32_e32 v17, s19 ; SI-NEXT: v_mov_b32_e32 v14, s20 -; SI-NEXT: v_mov_b32_e32 v13, s21 +; SI-NEXT: v_mov_b32_e32 v15, s21 ; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v11, s23 +; SI-NEXT: v_mov_b32_e32 v13, s23 ; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v9, s25 +; SI-NEXT: v_mov_b32_e32 v11, s25 ; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v7, s27 +; SI-NEXT: v_mov_b32_e32 v9, s27 ; SI-NEXT: v_mov_b32_e32 v6, s28 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v5, s29 +; SI-NEXT: v_mov_b32_e32 v7, s29 ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v17, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v21, v5, v6, 16 -; SI-NEXT: v_alignbit_b32 v22, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v25, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v27, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v29, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v31, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v33, v18, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v11 +; SI-NEXT: v_lshr_b64 v[20:21], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[6:7], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[10:11], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[16:17], 16 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 ; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v19 +; SI-NEXT: v_lshr_b64 v[28:29], v[18:19], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 ; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 ; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_alignbit_b32 v17, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v21, v5, v6, 16 -; SI-NEXT: v_alignbit_b32 v22, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v25, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v27, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v29, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v31, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v33, v18, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v11 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[20:21], v[3:4], 16 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_lshr_b64 v[22:23], v[6:7], 16 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_lshr_b64 v[23:24], v[8:9], 16 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_lshr_b64 v[24:25], v[10:11], 16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_lshr_b64 v[25:26], v[12:13], 16 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_lshr_b64 v[26:27], v[14:15], 16 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_lshr_b64 v[27:28], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[18:19], 16 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 ; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13 ; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v19 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v33 -; SI-NEXT: v_or_b32_e32 v19, v19, v33 -; SI-NEXT: buffer_store_dword v19, v0, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 ; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_or_b32_e32 v18, v18, v28 +; SI-NEXT: buffer_store_dword v18, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 +; SI-NEXT: v_and_b32_e32 v18, 0xffff, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v37 ; SI-NEXT: v_or_b32_e32 v18, v18, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v27 +; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: v_or_b32_e32 v16, v16, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v16, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v35 -; SI-NEXT: v_or_b32_e32 v15, v15, v16 -; SI-NEXT: v_add_i32_e32 v16, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v16, 0xffff, v17 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36 +; SI-NEXT: v_or_b32_e32 v16, v16, v17 +; SI-NEXT: v_add_i32_e32 v17, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v26 ; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_or_b32_e32 v14, v14, v16 +; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v29 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v35 ; SI-NEXT: v_or_b32_e32 v14, v14, v15 -; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0 +; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v34 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v25 +; SI-NEXT: v_or_b32_e32 v12, v12, v14 +; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v12, v14, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v12, 0xffff, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v34 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v32 -; SI-NEXT: v_or_b32_e32 v11, v11, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v24 +; SI-NEXT: v_or_b32_e32 v10, v10, v12 +; SI-NEXT: v_add_i32_e32 v12, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v10, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v33 ; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v30 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v23 +; SI-NEXT: v_or_b32_e32 v8, v8, v10 +; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v8, v10, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v8, 0xffff, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v32 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 40, v0 +; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v28 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v22 +; SI-NEXT: v_or_b32_e32 v6, v6, v8 +; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v6, v8, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v6, 0xffff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v31 ; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 +; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v26 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v20 -; SI-NEXT: v_or_b32_e32 v1, v1, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; SI-NEXT: v_or_b32_e32 v1, v1, v6 +; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v24 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v20 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr17 -; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v18f32_to_v36i16_scalar: @@ -15972,216 +15970,214 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5 -; SI-NEXT: v_readfirstlane_b32 s9, v1 -; SI-NEXT: v_readfirstlane_b32 s8, v2 -; SI-NEXT: v_readfirstlane_b32 s7, v3 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v4 +; SI-NEXT: v_readfirstlane_b32 s6, v1 +; SI-NEXT: v_readfirstlane_b32 s7, v2 +; SI-NEXT: v_readfirstlane_b32 s4, v3 +; SI-NEXT: s_and_b64 s[8:9], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v4 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v4, s26 -; SI-NEXT: v_mov_b32_e32 v5, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v7, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s27, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s25, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s21, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s19, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s17, v9, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 16 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s27, 16 -; SI-NEXT: s_lshr_b32 s14, s25, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 16 -; SI-NEXT: s_lshr_b32 s40, s21, 16 -; SI-NEXT: s_lshr_b32 s41, s19, 16 -; SI-NEXT: s_lshr_b32 s42, s17, 16 +; SI-NEXT: s_lshr_b32 s60, s5, 16 +; SI-NEXT: s_lshr_b32 s61, s7, 16 +; SI-NEXT: s_lshr_b32 s62, s29, 16 +; SI-NEXT: s_lshr_b32 s63, s27, 16 +; SI-NEXT: s_lshr_b32 s72, s25, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 16 +; SI-NEXT: s_lshr_b32 s74, s21, 16 +; SI-NEXT: s_lshr_b32 s75, s19, 16 +; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s28 -; SI-NEXT: v_mov_b32_e32 v4, s26 -; SI-NEXT: v_mov_b32_e32 v5, s24 -; SI-NEXT: v_mov_b32_e32 v6, s22 -; SI-NEXT: v_mov_b32_e32 v7, s20 -; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v9, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s29, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s27, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s25, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s23, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s21, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s19, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s17, v9, 16 -; SI-NEXT: s_lshr_b32 s10, s6, 16 -; SI-NEXT: s_lshr_b32 s11, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s29, 16 -; SI-NEXT: s_lshr_b32 s13, s27, 16 -; SI-NEXT: s_lshr_b32 s14, s25, 16 -; SI-NEXT: s_lshr_b32 s15, s23, 16 -; SI-NEXT: s_lshr_b32 s40, s21, 16 -; SI-NEXT: s_lshr_b32 s41, s19, 16 -; SI-NEXT: s_lshr_b32 s42, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s60, s5, 16 +; SI-NEXT: s_lshr_b32 s61, s7, 16 +; SI-NEXT: s_lshr_b32 s62, s29, 16 +; SI-NEXT: s_lshr_b32 s63, s27, 16 +; SI-NEXT: s_lshr_b32 s72, s25, 16 +; SI-NEXT: s_lshr_b32 s73, s23, 16 +; SI-NEXT: s_lshr_b32 s74, s21, 16 +; SI-NEXT: s_lshr_b32 s75, s19, 16 +; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: buffer_store_dword v9, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: s_lshl_b32 s9, s56, 16 +; SI-NEXT: s_and_b32 s11, s16, 0xffff +; SI-NEXT: s_or_b32 s9, s11, s9 +; SI-NEXT: v_mov_b32_e32 v1, s9 +; SI-NEXT: s_and_b32 s9, s17, 0xffff +; SI-NEXT: s_lshl_b32 s11, s76, 16 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s18, 0xffff +; SI-NEXT: s_lshl_b32 s11, s46, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s19, 0xffff +; SI-NEXT: s_lshl_b32 s11, s75, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s13, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s20, 0xffff +; SI-NEXT: s_lshl_b32 s11, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s12, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s21, 0xffff +; SI-NEXT: s_lshl_b32 s11, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s11, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s22, 0xffff +; SI-NEXT: s_lshl_b32 s11, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s10, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s23, 0xffff +; SI-NEXT: s_lshl_b32 s11, s73, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s24, 0xffff +; SI-NEXT: s_lshl_b32 s11, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s25, 0xffff +; SI-NEXT: s_lshl_b32 s11, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s26, 0xffff +; SI-NEXT: s_lshl_b32 s11, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s27, 0xffff +; SI-NEXT: s_lshl_b32 s11, s63, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s28, 0xffff +; SI-NEXT: s_lshl_b32 s11, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s9, s29, 0xffff +; SI-NEXT: s_lshl_b32 s11, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s9, s9, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s9 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s9, s10, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s6, s6, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s61, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s8, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr75 +; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr73 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr63 ; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr62 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr61 +; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr60 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v9i64_to_v36i16_scalar: @@ -21460,97 +21456,97 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i ; SI-NEXT: v_mov_b32_e32 v7, s29 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v5, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v21, v7, v6, 16 -; SI-NEXT: v_alignbit_b32 v22, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v24, v11, v10, 16 -; SI-NEXT: v_alignbit_b32 v26, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v29, v15, v14, 16 -; SI-NEXT: v_alignbit_b32 v31, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v34, v19, v18, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v19 +; SI-NEXT: v_lshr_b64 v[20:21], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[6:7], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[10:11], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[16:17], 16 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v19 +; SI-NEXT: v_lshr_b64 v[28:29], v[18:19], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0 -; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0 -; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0 -; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0 -; SI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0 -; SI-NEXT: v_add_f64 v[8:9], v[8:9], 1.0 -; SI-NEXT: v_add_f64 v[6:7], v[6:7], 1.0 ; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 ; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_alignbit_b32 v5, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v21, v7, v6, 16 -; SI-NEXT: v_alignbit_b32 v22, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v24, v11, v10, 16 -; SI-NEXT: v_alignbit_b32 v26, v13, v12, 16 -; SI-NEXT: v_alignbit_b32 v29, v15, v14, 16 -; SI-NEXT: v_alignbit_b32 v31, v17, v16, 16 -; SI-NEXT: v_alignbit_b32 v34, v19, v18, 16 -; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v19 +; SI-NEXT: v_add_f64 v[6:7], v[6:7], 1.0 +; SI-NEXT: v_lshr_b64 v[20:21], v[3:4], 16 +; SI-NEXT: v_add_f64 v[8:9], v[8:9], 1.0 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 +; SI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0 +; SI-NEXT: v_lshr_b64 v[22:23], v[6:7], 16 +; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0 +; SI-NEXT: v_lshr_b64 v[23:24], v[8:9], 16 +; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0 +; SI-NEXT: v_lshr_b64 v[24:25], v[10:11], 16 +; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[12:13], 16 +; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0 +; SI-NEXT: v_lshr_b64 v[26:27], v[14:15], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[18:19], 16 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v7 +; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v19 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 ; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_or_b32_e32 v18, v18, v34 +; SI-NEXT: v_or_b32_e32 v18, v18, v28 ; SI-NEXT: buffer_store_dword v18, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v18, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v37 ; SI-NEXT: v_or_b32_e32 v18, v18, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v27 +; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: v_or_b32_e32 v16, v16, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v16, v18, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v16, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36 ; SI-NEXT: v_or_b32_e32 v16, v16, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v26 +; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 ; SI-NEXT: v_or_b32_e32 v14, v14, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v14, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v35 ; SI-NEXT: v_or_b32_e32 v14, v14, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v25 ; SI-NEXT: v_or_b32_e32 v12, v12, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v12, v14, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v12, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v34 ; SI-NEXT: v_or_b32_e32 v12, v12, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen @@ -21562,79 +21558,79 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i ; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v10, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v33 ; SI-NEXT: v_or_b32_e32 v10, v10, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v23 ; SI-NEXT: v_or_b32_e32 v8, v8, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v8, v10, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v8, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v28 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v32 ; SI-NEXT: v_or_b32_e32 v8, v8, v9 ; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v22 ; SI-NEXT: v_or_b32_e32 v6, v6, v8 ; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v6, v8, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v6, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v31 ; SI-NEXT: v_or_b32_e32 v6, v6, v7 ; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v20 +; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v21 ; SI-NEXT: v_or_b32_e32 v1, v1, v6 ; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v20 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr24 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr26 +; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr24 +; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr5 -; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v9f64_to_v36i16_scalar: @@ -28520,302 +28516,321 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i ; SI-LABEL: bitcast_v36f16_to_v36i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v12 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v52, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v45, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v44, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v43, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v41, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v42, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v55, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v40, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v36, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v35, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v34, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v15, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v33, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v21, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v32, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v18, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v30, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v28, s29 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v23 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true +; SI-NEXT: v_cvt_f32_f16_e32 v5, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v44 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_or_b32_e32 v21, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v5 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_or_b32_e32 v1, v1, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v3, v3, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_or_b32_e32 v6, v6, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_or_b32_e32 v23, v23, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v18, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v30 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v16, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v28 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v14, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v26 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v50 +; SI-NEXT: v_or_b32_e32 v10, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v24 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v49 +; SI-NEXT: v_or_b32_e32 v8, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v39 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v22 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v39 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_or_b32_e32 v6, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v37 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v19 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v4, v3, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v43 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v38 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v11 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_or_b32_e32 v24, v24, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v36 +; SI-NEXT: v_or_b32_e32 v2, v2, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v45 +; SI-NEXT: v_or_b32_e32 v43, v12, v17 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v54 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 +; SI-NEXT: v_or_b32_e32 v45, v11, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v40 +; SI-NEXT: v_or_b32_e32 v42, v12, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v53 +; SI-NEXT: v_or_b32_e32 v54, v19, v9 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_or_b32_e32 v40, v11, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v51 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_or_b32_e32 v27, v27, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v13, v13, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v17 -; SI-NEXT: v_or_b32_e32 v16, v16, v37 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v20 -; SI-NEXT: v_or_b32_e32 v19, v19, v37 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36 -; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v35 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v34 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v33 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32 -; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v22, v22, v36 -; SI-NEXT: v_or_b32_e32 v21, v21, v35 -; SI-NEXT: v_or_b32_e32 v18, v18, v34 -; SI-NEXT: v_or_b32_e32 v15, v15, v33 -; SI-NEXT: v_or_b32_e32 v29, v29, v32 -; SI-NEXT: v_or_b32_e32 v26, v26, v31 -; SI-NEXT: v_or_b32_e32 v11, v11, v30 -; SI-NEXT: v_or_b32_e32 v8, v8, v12 -; SI-NEXT: v_or_b32_e32 v5, v5, v9 -; SI-NEXT: v_alignbit_b32 v36, v19, v36, 16 -; SI-NEXT: v_alignbit_b32 v35, v16, v35, 16 -; SI-NEXT: v_alignbit_b32 v34, v13, v34, 16 -; SI-NEXT: v_alignbit_b32 v33, v27, v33, 16 -; SI-NEXT: v_alignbit_b32 v32, v24, v32, 16 -; SI-NEXT: v_alignbit_b32 v31, v23, v31, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v30, 16 -; SI-NEXT: v_alignbit_b32 v12, v3, v12, 16 -; SI-NEXT: v_alignbit_b32 v9, v1, v9, 16 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v53, v12, v7 +; SI-NEXT: v_or_b32_e32 v51, v11, v5 +; SI-NEXT: v_or_b32_e32 v48, v19, v3 +; SI-NEXT: v_or_b32_e32 v38, v22, v1 +; SI-NEXT: v_lshr_b64 v[34:35], v[20:21], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[11:12], v[1:2], 16 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v22, v22, v36 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v35 -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v34 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v45 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v34 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v44 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v33 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v32 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v43 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v28 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v41 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v32 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v30 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v42 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v25 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v31 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v28 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v13, v10 -; SI-NEXT: v_add_i32_e32 v13, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v30 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 -; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v26 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v12 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v9 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v24 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v22 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v39 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v38 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll index b8091d8256457..47cb6bd3b3bb6 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll @@ -2838,240 +2838,238 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; SI-NEXT: v_readfirstlane_b32 s11, v1 -; SI-NEXT: v_readfirstlane_b32 s10, v2 -; SI-NEXT: v_readfirstlane_b32 s9, v3 -; SI-NEXT: v_readfirstlane_b32 s8, v4 -; SI-NEXT: v_readfirstlane_b32 s7, v5 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v6 +; SI-NEXT: v_readfirstlane_b32 s8, v1 +; SI-NEXT: v_readfirstlane_b32 s9, v2 +; SI-NEXT: v_readfirstlane_b32 s6, v3 +; SI-NEXT: v_readfirstlane_b32 s7, v4 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_and_b64 s[10:11], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v6 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s18 -; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s29, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s25, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s23, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s21, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s17, v10, 16 -; SI-NEXT: s_lshr_b32 s12, s6, 16 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s14, s10, 16 -; SI-NEXT: s_lshr_b32 s15, s29, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 16 -; SI-NEXT: s_lshr_b32 s41, s25, 16 -; SI-NEXT: s_lshr_b32 s42, s23, 16 -; SI-NEXT: s_lshr_b32 s43, s21, 16 -; SI-NEXT: s_lshr_b32 s44, s19, 16 -; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b32 s72, s5, 16 +; SI-NEXT: s_lshr_b32 s73, s7, 16 +; SI-NEXT: s_lshr_b32 s74, s9, 16 +; SI-NEXT: s_lshr_b32 s75, s29, 16 +; SI-NEXT: s_lshr_b32 s76, s27, 16 +; SI-NEXT: s_lshr_b32 s77, s25, 16 +; SI-NEXT: s_lshr_b32 s78, s23, 16 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s19, 16 +; SI-NEXT: s_lshr_b32 s89, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s10, s10, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s18 -; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s29, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s25, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s23, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s21, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s17, v10, 16 -; SI-NEXT: s_lshr_b32 s12, s6, 16 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s14, s10, 16 -; SI-NEXT: s_lshr_b32 s15, s29, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 16 -; SI-NEXT: s_lshr_b32 s41, s25, 16 -; SI-NEXT: s_lshr_b32 s42, s23, 16 -; SI-NEXT: s_lshr_b32 s43, s21, 16 -; SI-NEXT: s_lshr_b32 s44, s19, 16 -; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: s_lshr_b32 s72, s5, 16 +; SI-NEXT: s_lshr_b32 s73, s7, 16 +; SI-NEXT: s_lshr_b32 s74, s9, 16 +; SI-NEXT: s_lshr_b32 s75, s29, 16 +; SI-NEXT: s_lshr_b32 s76, s27, 16 +; SI-NEXT: s_lshr_b32 s77, s25, 16 +; SI-NEXT: s_lshr_b32 s78, s23, 16 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s19, 16 +; SI-NEXT: s_lshr_b32 s89, s17, 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: s_lshl_b32 s11, s60, 16 +; SI-NEXT: s_and_b32 s13, s16, 0xffff +; SI-NEXT: s_or_b32 s11, s13, s11 +; SI-NEXT: v_mov_b32_e32 v1, s11 +; SI-NEXT: s_and_b32 s11, s17, 0xffff +; SI-NEXT: s_lshl_b32 s13, s89, 16 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_lshl_b32 s11, s58, 16 +; SI-NEXT: s_and_b32 s13, s18, 0xffff +; SI-NEXT: s_or_b32 s11, s13, s11 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s19, 0xffff +; SI-NEXT: s_lshl_b32 s13, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s20, 0xffff +; SI-NEXT: s_lshl_b32 s13, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s21, 0xffff +; SI-NEXT: s_lshl_b32 s13, s79, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s22, 0xffff +; SI-NEXT: s_lshl_b32 s13, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s23, 0xffff +; SI-NEXT: s_lshl_b32 s13, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s13, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s24, 0xffff +; SI-NEXT: s_lshl_b32 s13, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s12, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s25, 0xffff +; SI-NEXT: s_lshl_b32 s13, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s26, 0xffff +; SI-NEXT: s_lshl_b32 s13, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s27, 0xffff +; SI-NEXT: s_lshl_b32 s13, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s28, 0xffff +; SI-NEXT: s_lshl_b32 s13, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s29, 0xffff +; SI-NEXT: s_lshl_b32 s13, s75, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s11, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s8, s8, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s73, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s10, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr76 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v20i32_to_v40i16_scalar: @@ -10571,165 +10569,165 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; SI-NEXT: v_mov_b32_e32 v21, s16 +; SI-NEXT: v_mov_b32_e32 v19, s16 ; SI-NEXT: v_mov_b32_e32 v20, s17 -; SI-NEXT: v_mov_b32_e32 v19, s18 -; SI-NEXT: v_mov_b32_e32 v17, s19 -; SI-NEXT: v_mov_b32_e32 v16, s20 -; SI-NEXT: v_mov_b32_e32 v15, s21 -; SI-NEXT: v_mov_b32_e32 v14, s22 -; SI-NEXT: v_mov_b32_e32 v13, s23 -; SI-NEXT: v_mov_b32_e32 v12, s24 -; SI-NEXT: v_mov_b32_e32 v11, s25 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v9, s27 -; SI-NEXT: v_mov_b32_e32 v8, s28 +; SI-NEXT: v_mov_b32_e32 v17, s18 +; SI-NEXT: v_mov_b32_e32 v18, s19 +; SI-NEXT: v_mov_b32_e32 v15, s20 +; SI-NEXT: v_mov_b32_e32 v16, s21 +; SI-NEXT: v_mov_b32_e32 v13, s22 +; SI-NEXT: v_mov_b32_e32 v14, s23 +; SI-NEXT: v_mov_b32_e32 v11, s24 +; SI-NEXT: v_mov_b32_e32 v12, s25 +; SI-NEXT: v_mov_b32_e32 v9, s26 +; SI-NEXT: v_mov_b32_e32 v10, s27 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v7, s29 +; SI-NEXT: v_mov_b32_e32 v7, s28 +; SI-NEXT: v_mov_b32_e32 v8, s29 ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v18, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v23, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v26, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v28, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v33, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v35, v17, v19, 16 -; SI-NEXT: v_alignbit_b32 v37, v20, v21, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v20 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v20 +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[19:20], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 ; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 ; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 ; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_alignbit_b32 v18, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v23, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, v7, v8, 16 -; SI-NEXT: v_alignbit_b32 v26, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v28, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v33, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v35, v17, v19, 16 -; SI-NEXT: v_alignbit_b32 v37, v20, v21, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v20 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[22:23], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[19:20], 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v20 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: v_or_b32_e32 v21, v21, v37 -; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 -; SI-NEXT: v_or_b32_e32 v20, v20, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v31 ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 +; SI-NEXT: v_or_b32_e32 v19, v19, v26 +; SI-NEXT: buffer_store_dword v19, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v35 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v50 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0 +; SI-NEXT: v_add_i32_e32 v20, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v30 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0 +; SI-NEXT: v_add_i32_e32 v19, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v49 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 +; SI-NEXT: v_add_i32_e32 v18, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v29 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 +; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v48 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v31 -; SI-NEXT: v_or_b32_e32 v14, v14, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v28 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v36 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v39 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v28 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v25 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v38 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v26 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v27 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v32 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v37 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v24 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 +; SI-NEXT: v_or_b32_e32 v7, v7, v9 ; SI-NEXT: v_add_i32_e32 v9, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v30 +; SI-NEXT: v_and_b32_e32 v7, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v36 ; SI-NEXT: v_or_b32_e32 v7, v7, v8 ; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen @@ -10741,7 +10739,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -10753,45 +10751,45 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v18 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v21 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr18 -; SI-NEXT: ; implicit-def: $vgpr25 +; SI-NEXT: ; implicit-def: $vgpr21 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v20f32_to_v40i16_scalar: @@ -17582,240 +17580,238 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7 -; SI-NEXT: v_readfirstlane_b32 s11, v1 -; SI-NEXT: v_readfirstlane_b32 s10, v2 -; SI-NEXT: v_readfirstlane_b32 s9, v3 -; SI-NEXT: v_readfirstlane_b32 s8, v4 -; SI-NEXT: v_readfirstlane_b32 s7, v5 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v6 +; SI-NEXT: v_readfirstlane_b32 s8, v1 +; SI-NEXT: v_readfirstlane_b32 s9, v2 +; SI-NEXT: v_readfirstlane_b32 s6, v3 +; SI-NEXT: v_readfirstlane_b32 s7, v4 +; SI-NEXT: v_readfirstlane_b32 s4, v5 +; SI-NEXT: s_and_b64 s[10:11], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v6 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s18 -; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s29, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s25, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s23, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s21, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s17, v10, 16 -; SI-NEXT: s_lshr_b32 s12, s6, 16 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s14, s10, 16 -; SI-NEXT: s_lshr_b32 s15, s29, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 16 -; SI-NEXT: s_lshr_b32 s41, s25, 16 -; SI-NEXT: s_lshr_b32 s42, s23, 16 -; SI-NEXT: s_lshr_b32 s43, s21, 16 -; SI-NEXT: s_lshr_b32 s44, s19, 16 -; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_lshr_b32 s72, s5, 16 +; SI-NEXT: s_lshr_b32 s73, s7, 16 +; SI-NEXT: s_lshr_b32 s74, s9, 16 +; SI-NEXT: s_lshr_b32 s75, s29, 16 +; SI-NEXT: s_lshr_b32 s76, s27, 16 +; SI-NEXT: s_lshr_b32 s77, s25, 16 +; SI-NEXT: s_lshr_b32 s78, s23, 16 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s19, 16 +; SI-NEXT: s_lshr_b32 s89, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 -; SI-NEXT: s_add_u32 s28, s28, 3 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s28 -; SI-NEXT: v_mov_b32_e32 v5, s26 -; SI-NEXT: v_mov_b32_e32 v6, s24 -; SI-NEXT: v_mov_b32_e32 v7, s22 -; SI-NEXT: v_mov_b32_e32 v8, s20 -; SI-NEXT: v_mov_b32_e32 v9, s18 -; SI-NEXT: v_mov_b32_e32 v10, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s29, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s27, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s25, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s23, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s21, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s19, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s17, v10, 16 -; SI-NEXT: s_lshr_b32 s12, s6, 16 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s14, s10, 16 -; SI-NEXT: s_lshr_b32 s15, s29, 16 -; SI-NEXT: s_lshr_b32 s40, s27, 16 -; SI-NEXT: s_lshr_b32 s41, s25, 16 -; SI-NEXT: s_lshr_b32 s42, s23, 16 -; SI-NEXT: s_lshr_b32 s43, s21, 16 -; SI-NEXT: s_lshr_b32 s44, s19, 16 -; SI-NEXT: s_lshr_b32 s45, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s72, s5, 16 +; SI-NEXT: s_lshr_b32 s73, s7, 16 +; SI-NEXT: s_lshr_b32 s74, s9, 16 +; SI-NEXT: s_lshr_b32 s75, s29, 16 +; SI-NEXT: s_lshr_b32 s76, s27, 16 +; SI-NEXT: s_lshr_b32 s77, s25, 16 +; SI-NEXT: s_lshr_b32 s78, s23, 16 +; SI-NEXT: s_lshr_b32 s79, s21, 16 +; SI-NEXT: s_lshr_b32 s88, s19, 16 +; SI-NEXT: s_lshr_b32 s89, s17, 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: buffer_store_dword v10, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: s_lshl_b32 s11, s60, 16 +; SI-NEXT: s_and_b32 s13, s16, 0xffff +; SI-NEXT: s_or_b32 s11, s13, s11 +; SI-NEXT: v_mov_b32_e32 v1, s11 +; SI-NEXT: s_and_b32 s11, s17, 0xffff +; SI-NEXT: s_lshl_b32 s13, s89, 16 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_lshl_b32 s11, s58, 16 +; SI-NEXT: s_and_b32 s13, s18, 0xffff +; SI-NEXT: s_or_b32 s11, s13, s11 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s19, 0xffff +; SI-NEXT: s_lshl_b32 s13, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s20, 0xffff +; SI-NEXT: s_lshl_b32 s13, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s21, 0xffff +; SI-NEXT: s_lshl_b32 s13, s79, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s22, 0xffff +; SI-NEXT: s_lshl_b32 s13, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s23, 0xffff +; SI-NEXT: s_lshl_b32 s13, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s13, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s24, 0xffff +; SI-NEXT: s_lshl_b32 s13, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s12, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0 +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s25, 0xffff +; SI-NEXT: s_lshl_b32 s13, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s26, 0xffff +; SI-NEXT: s_lshl_b32 s13, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s27, 0xffff +; SI-NEXT: s_lshl_b32 s13, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s28, 0xffff +; SI-NEXT: s_lshl_b32 s13, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s11, s29, 0xffff +; SI-NEXT: s_lshl_b32 s13, s75, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s11, s11, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s11 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s11, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s8, s8, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s73, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s10, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr56 +; SI-NEXT: ; implicit-def: $sgpr79 +; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr77 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr76 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr75 ; SI-NEXT: ; implicit-def: $sgpr14 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr13 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr74 ; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr73 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr72 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v10i64_to_v40i16_scalar: @@ -23902,126 +23898,126 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: v_mov_b32_e32 v8, s29 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v23, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v26, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v28, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v30, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v35, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v37, v20, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v20 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[22:23], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v20 +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[19:20], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 ; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 ; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v22, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v23, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v24, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v26, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v28, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v30, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v33, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v35, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v37, v20, v19, 16 -; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v20 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_lshr_b64 v[21:22], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_lshr_b64 v[22:23], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_lshr_b64 v[23:24], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[19:20], 16 +; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v20 ; SI-NEXT: .LBB49_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v31 ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v37 -; SI-NEXT: v_or_b32_e32 v19, v19, v37 +; SI-NEXT: v_or_b32_e32 v19, v19, v26 ; SI-NEXT: buffer_store_dword v19, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v50 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v30 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v49 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v29 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v48 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v28 ; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v39 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v28 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v25 ; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v38 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v27 ; SI-NEXT: v_or_b32_e32 v9, v9, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v37 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen @@ -24033,7 +24029,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v7, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v36 ; SI-NEXT: v_or_b32_e32 v7, v7, v8 ; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen @@ -24045,7 +24041,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v7, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -24057,7 +24053,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -24069,33 +24065,33 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr25 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v10f64_to_v40i16_scalar: @@ -32185,338 +32181,367 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i ; SI-LABEL: bitcast_v40f16_to_v40i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v35, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v49, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v15, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v42, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v26 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v58, s16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v59, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v57, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v56, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v47, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v46, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v45, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v36, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v12, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v25, s29 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v27 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v27, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v26 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v27 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v25 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v48 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v39 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v25 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v45 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_or_b32_e32 v34, v25, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v38 -; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v23 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v45, v22, v17 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v54 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v57 +; SI-NEXT: v_or_b32_e32 v54, v22, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v51 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v58 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v57, v3, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_or_b32_e32 v58, v1, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v43, v3, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v40 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_or_b32_e32 v47, v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v41 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v40, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_or_b32_e32 v41, v1, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v23 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v5, v5, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2 -; SI-NEXT: v_or_b32_e32 v3, v3, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8 -; SI-NEXT: v_or_b32_e32 v9, v9, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v7 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_or_b32_e32 v51, v22, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v48 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_or_b32_e32 v12, v12, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v10 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v22 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 +; SI-NEXT: v_or_b32_e32 v2, v2, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v53 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v49 +; SI-NEXT: v_or_b32_e32 v4, v4, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v52 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v21 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_or_b32_e32 v28, v28, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v25 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v33 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v53 +; SI-NEXT: v_or_b32_e32 v6, v6, v21 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v22 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v52 +; SI-NEXT: v_or_b32_e32 v8, v8, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v44 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v55 +; SI-NEXT: v_or_b32_e32 v10, v10, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v42 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v21 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v22 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v21 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v44 +; SI-NEXT: v_or_b32_e32 v14, v14, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v42 +; SI-NEXT: v_or_b32_e32 v18, v18, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v21, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_or_b32_e32 v33, v25, v33 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v22 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_or_b32_e32 v35, v35, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v13 -; SI-NEXT: v_or_b32_e32 v14, v14, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v17 -; SI-NEXT: v_or_b32_e32 v20, v20, v25 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v18 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v39 -; SI-NEXT: v_or_b32_e32 v19, v19, v25 -; SI-NEXT: v_or_b32_e32 v22, v22, v26 -; SI-NEXT: v_or_b32_e32 v21, v21, v27 -; SI-NEXT: v_or_b32_e32 v16, v16, v24 -; SI-NEXT: v_or_b32_e32 v15, v15, v48 -; SI-NEXT: v_or_b32_e32 v30, v30, v38 -; SI-NEXT: v_or_b32_e32 v29, v29, v37 -; SI-NEXT: v_or_b32_e32 v11, v11, v51 -; SI-NEXT: v_or_b32_e32 v6, v6, v23 -; SI-NEXT: v_or_b32_e32 v4, v4, v52 -; SI-NEXT: v_alignbit_b32 v49, v19, v26, 16 -; SI-NEXT: v_alignbit_b32 v26, v20, v27, 16 -; SI-NEXT: v_alignbit_b32 v25, v14, v24, 16 -; SI-NEXT: v_alignbit_b32 v24, v35, v48, 16 -; SI-NEXT: v_alignbit_b32 v48, v33, v50, 16 -; SI-NEXT: v_alignbit_b32 v39, v28, v38, 16 -; SI-NEXT: v_alignbit_b32 v38, v12, v37, 16 -; SI-NEXT: v_alignbit_b32 v37, v9, v51, 16 -; SI-NEXT: v_alignbit_b32 v36, v3, v23, 16 -; SI-NEXT: v_alignbit_b32 v23, v5, v52, 16 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v21 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v46 +; SI-NEXT: v_or_b32_e32 v16, v16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v59 +; SI-NEXT: v_or_b32_e32 v12, v12, v22 +; SI-NEXT: v_or_b32_e32 v20, v20, v21 +; SI-NEXT: v_lshr_b64 v[25:26], v[17:18], 16 +; SI-NEXT: v_or_b32_e32 v50, v23, v1 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[23:24], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[21:22], v[1:2], 16 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v49 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_or_b32_e32 v22, v22, v27 -; SI-NEXT: v_or_b32_e32 v18, v19, v18 -; SI-NEXT: v_add_i32_e32 v19, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v22, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v38 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v58 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v26 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v17, v18, v17 -; SI-NEXT: v_add_i32_e32 v18, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v36 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v25 -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v13, v14, v13 -; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v56 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v24 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v34 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v47 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v31 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v46 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v48 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v45 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v25 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v32 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v42 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v39 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v43 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v10, v13, v10 -; SI-NEXT: v_add_i32_e32 v13, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v44 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v38 -; SI-NEXT: v_or_b32_e32 v10, v10, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v10, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v41 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v10, v7 -; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v37 -; SI-NEXT: v_or_b32_e32 v7, v7, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v28 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v36 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v40 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v26 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v4 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v53 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v50 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v21 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll index ab1f8606cffd7..67c9bfe9d9f3b 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll @@ -1065,24 +1065,23 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b) ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true ; SI-NEXT: s_add_u32 s16, s16, 3 ; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_i64_to_v4i16_scalar: @@ -2708,38 +2707,39 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) { ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB25_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB25_3 ; SI-NEXT: .LBB25_2: ; %cmp.true ; SI-NEXT: s_add_u32 s16, s16, 3 ; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: .LBB25_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_mov_b32_e32 v3, s4 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v6, s13 +; SI-NEXT: v_mov_b32_e32 v7, s12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB25_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB25_2 ; ; VI-LABEL: bitcast_i64_to_v8i8_scalar: @@ -4222,23 +4222,23 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg % ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB37_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB37_4 ; SI-NEXT: .LBB37_2: ; %cmp.true ; SI-NEXT: v_add_f64 v[4:5], s[16:17], 1.0 -; SI-NEXT: v_alignbit_b32 v1, v5, v4, 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[4:5], 16 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v5 ; SI-NEXT: s_branch .LBB37_5 ; SI-NEXT: .LBB37_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB37_2 ; SI-NEXT: .LBB37_4: ; SI-NEXT: v_mov_b32_e32 v5, s17 ; SI-NEXT: v_mov_b32_e32 v4, s16 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: .LBB37_5: ; %end ; SI-NEXT: v_mov_b32_e32 v0, v4 ; SI-NEXT: v_mov_b32_e32 v2, v5 @@ -5836,40 +5836,43 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b) ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB49_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s8, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 8 +; SI-NEXT: s_lshr_b32 s14, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s12, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB49_4 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[8:9], s[16:17], 1.0 -; SI-NEXT: v_alignbit_b32 v3, v9, v8, 24 -; SI-NEXT: v_alignbit_b32 v2, v9, v8, 16 -; SI-NEXT: v_alignbit_b32 v1, v9, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v9 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v9 +; SI-NEXT: v_add_f64 v[10:11], s[16:17], 1.0 +; SI-NEXT: v_lshr_b64 v[3:4], v[10:11], 24 +; SI-NEXT: v_lshr_b64 v[8:9], v[10:11], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[10:11], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v11 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v11 ; SI-NEXT: s_branch .LBB49_5 ; SI-NEXT: .LBB49_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr14 ; SI-NEXT: s_branch .LBB49_2 ; SI-NEXT: .LBB49_4: -; SI-NEXT: v_mov_b32_e32 v9, s17 -; SI-NEXT: v_mov_b32_e32 v8, s16 -; SI-NEXT: v_mov_b32_e32 v7, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v5, s6 +; SI-NEXT: v_mov_b32_e32 v11, s17 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: v_mov_b32_e32 v7, s14 +; SI-NEXT: v_mov_b32_e32 v6, s13 +; SI-NEXT: v_mov_b32_e32 v5, s12 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v8, s6 +; SI-NEXT: v_mov_b32_e32 v3, s4 ; SI-NEXT: .LBB49_5: ; %end -; SI-NEXT: v_mov_b32_e32 v0, v8 -; SI-NEXT: v_mov_b32_e32 v4, v9 +; SI-NEXT: v_mov_b32_e32 v0, v10 +; SI-NEXT: v_mov_b32_e32 v2, v8 +; SI-NEXT: v_mov_b32_e32 v4, v11 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_f64_to_v8i8_scalar: @@ -7049,24 +7052,23 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB57_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 ; SI-NEXT: .LBB57_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s8 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB57_2 ; ; VI-LABEL: bitcast_v2i32_to_v4i16_scalar: @@ -8688,38 +8690,39 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB69_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB69_3 ; SI-NEXT: .LBB69_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 ; SI-NEXT: .LBB69_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: v_mov_b32_e32 v3, s4 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v6, s13 +; SI-NEXT: v_mov_b32_e32 v7, s12 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB69_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB69_2 ; ; VI-LABEL: bitcast_v2i32_to_v8i8_scalar: @@ -9564,24 +9567,27 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB73_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s8, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB73_4 ; SI-NEXT: .LBB73_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v5, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v4, s16, 1.0 +; SI-NEXT: v_lshr_b64 v[1:2], v[4:5], 16 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v5 +; SI-NEXT: s_branch .LBB73_5 ; SI-NEXT: .LBB73_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB73_2 ; SI-NEXT: .LBB73_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: v_mov_b32_e32 v5, s17 +; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v1, s4 +; SI-NEXT: .LBB73_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v4 +; SI-NEXT: v_mov_b32_e32 v2, v5 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v2f32_to_v4i16_scalar: @@ -11206,38 +11212,44 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s18, 0 ; SI-NEXT: s_cbranch_scc0 .LBB85_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s12, s17, 24 +; SI-NEXT: s_lshr_b32 s13, s17, 16 +; SI-NEXT: s_lshr_b32 s14, s17, 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB85_4 ; SI-NEXT: .LBB85_2: ; %cmp.true -; SI-NEXT: v_add_f32_e64 v4, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v4 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v11, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v10, s16, 1.0 +; SI-NEXT: v_lshr_b64 v[3:4], v[10:11], 24 +; SI-NEXT: v_lshr_b64 v[8:9], v[10:11], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[10:11], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v11 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v11 +; SI-NEXT: s_branch .LBB85_5 ; SI-NEXT: .LBB85_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr13 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: s_branch .LBB85_2 ; SI-NEXT: .LBB85_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v10, s16 +; SI-NEXT: v_mov_b32_e32 v11, s17 +; SI-NEXT: v_mov_b32_e32 v5, s14 +; SI-NEXT: v_mov_b32_e32 v6, s13 +; SI-NEXT: v_mov_b32_e32 v7, s12 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: v_mov_b32_e32 v8, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: .LBB85_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v10 +; SI-NEXT: v_mov_b32_e32 v2, v8 +; SI-NEXT: v_mov_b32_e32 v4, v11 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v2f32_to_v8i8_scalar: @@ -12327,7 +12339,7 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v4, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: s_cmp_lg_u32 s20, 0 @@ -12336,23 +12348,24 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i ; SI-NEXT: s_cbranch_execnz .LBB91_3 ; SI-NEXT: .LBB91_2: ; %cmp.true ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v2, v2, v4 +; SI-NEXT: v_lshr_b64 v[4:5], v[1:2], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 ; SI-NEXT: .LBB91_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v4 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB91_4: ; SI-NEXT: s_branch .LBB91_2 @@ -12938,34 +12951,35 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v6, 1.0, s17 ; SI-NEXT: v_mul_f32_e64 v5, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v4, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 ; SI-NEXT: s_cbranch_scc0 .LBB95_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v6 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: s_cbranch_execnz .LBB95_3 ; SI-NEXT: .LBB95_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v6 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v4 -; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v5 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v7 ; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 +; SI-NEXT: v_lshr_b64 v[4:5], v[1:2], 16 +; SI-NEXT: v_alignbit_b32 v0, v6, v0, 16 ; SI-NEXT: .LBB95_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v4 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB95_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr4 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: s_branch .LBB95_2 @@ -13395,52 +13409,52 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v3, s7, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s7, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 8 -; SI-NEXT: s_lshr_b32 s9, s7, 8 -; SI-NEXT: s_and_b32 s10, s19, 0xffff -; SI-NEXT: s_bfe_u32 s8, s19, 0x80008 +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s9, s5, 8 +; SI-NEXT: s_and_b32 s11, s19, 0xffff +; SI-NEXT: s_bfe_u32 s7, s19, 0x80008 ; SI-NEXT: s_cbranch_execnz .LBB97_3 ; SI-NEXT: .LBB97_2: ; %cmp.true ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v3, s7, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s7, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 8 -; SI-NEXT: s_lshr_b32 s8, s7, 24 -; SI-NEXT: s_lshr_b32 s10, s7, 16 -; SI-NEXT: s_lshr_b32 s9, s7, 8 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s7, s5, 24 +; SI-NEXT: s_lshr_b32 s11, s5, 16 +; SI-NEXT: s_lshr_b32 s9, s5, 8 ; SI-NEXT: .LBB97_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v4, s7 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v4, s5 ; SI-NEXT: v_mov_b32_e32 v5, s9 -; SI-NEXT: v_mov_b32_e32 v6, s10 -; SI-NEXT: v_mov_b32_e32 v7, s8 +; SI-NEXT: v_mov_b32_e32 v6, s11 +; SI-NEXT: v_mov_b32_e32 v7, s7 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB97_4: -; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 -; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: ; implicit-def: $sgpr8 +; SI-NEXT: ; implicit-def: $sgpr6 +; SI-NEXT: ; implicit-def: $sgpr9 +; SI-NEXT: ; implicit-def: $sgpr11 +; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: s_branch .LBB97_2 ; ; VI-LABEL: bitcast_v4i16_to_v8i8_scalar: @@ -13970,27 +13984,27 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre ; SI-NEXT: s_cmp_lg_u32 s24, 0 ; SI-NEXT: s_cbranch_scc0 .LBB99_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s23, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 ; SI-NEXT: s_lshl_b32 s6, s19, 24 -; SI-NEXT: s_or_b32 s4, s6, s4 -; SI-NEXT: s_and_b32 s6, s16, 0xff -; SI-NEXT: s_lshl_b32 s8, s17, 8 -; SI-NEXT: s_or_b32 s6, s6, s8 -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_and_b32 s6, s6, 0xffff -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 16 -; SI-NEXT: s_or_b32 s6, s6, s4 -; SI-NEXT: s_lshr_b32 s8, s5, 16 +; SI-NEXT: s_or_b32 s10, s6, s5 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s6, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s6, s22, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s23, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s8, s7, s6 +; SI-NEXT: s_or_b32 s11, s5, s8 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshr_b64 s[6:7], s[10:11], 16 +; SI-NEXT: s_or_b32 s4, s4, s10 +; SI-NEXT: s_lshr_b32 s7, s8, 16 +; SI-NEXT: s_mov_b32 s5, s11 ; SI-NEXT: s_cbranch_execnz .LBB99_3 ; SI-NEXT: .LBB99_2: ; %cmp.true ; SI-NEXT: s_add_i32 s16, s16, 3 @@ -14004,34 +14018,33 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre ; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s6, s21, 8 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_and_b32 s7, s22, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s23, 24 ; SI-NEXT: s_lshl_b32 s7, s7, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s7 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s7, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 16 -; SI-NEXT: s_lshr_b32 s8, s7, 16 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s7, s5, 16 ; SI-NEXT: .LBB99_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v2, s7 -; SI-NEXT: v_mov_b32_e32 v3, s8 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: v_mov_b32_e32 v3, s7 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB99_4: +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: s_branch .LBB99_2 ; ; VI-LABEL: bitcast_v8i8_to_v4i16_scalar: @@ -15220,53 +15233,55 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr ; SI-LABEL: bitcast_v4f16_to_v8i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v10, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v9, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v8, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v8, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v0, s18 ; SI-NEXT: s_cmp_lg_u32 s20, 0 ; SI-NEXT: s_cbranch_scc0 .LBB105_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v10 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v11 +; SI-NEXT: v_or_b32_e32 v9, v8, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v0, v9, v0 -; SI-NEXT: v_or_b32_e32 v4, v8, v1 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_or_b32_e32 v10, v0, v1 +; SI-NEXT: v_lshr_b64 v[3:4], v[9:10], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: s_cbranch_execnz .LBB105_3 ; SI-NEXT: .LBB105_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v0, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v2 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v3 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v9, v2, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v2, v1 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_or_b32_e32 v10, v0, v1 +; SI-NEXT: v_lshr_b64 v[3:4], v[9:10], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: .LBB105_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v9 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB105_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: s_branch .LBB105_2 @@ -16420,48 +16435,50 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_cmp_lg_u32 s20, 0 -; SI-NEXT: v_mul_f32_e64 v10, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v11, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v8, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v9, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v11, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v12, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v8, 1.0, s18 ; SI-NEXT: s_cbranch_scc0 .LBB109_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v8 -; SI-NEXT: v_alignbit_b32 v0, v0, v11, 16 -; SI-NEXT: v_alignbit_b32 v4, v6, v9, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; SI-NEXT: v_alignbit_b32 v9, v1, v12, 16 +; SI-NEXT: v_alignbit_b32 v10, v6, v8, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[9:10], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v0 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10 ; SI-NEXT: s_cbranch_execnz .LBB109_3 ; SI-NEXT: .LBB109_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v10 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v11 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v12 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; SI-NEXT: v_alignbit_b32 v9, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v8 -; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_alignbit_b32 v4, v6, v1, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; SI-NEXT: v_alignbit_b32 v10, v6, v1, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[9:10], 24 +; SI-NEXT: v_lshr_b64 v[4:5], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[9:10], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v10 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v0 ; SI-NEXT: .LBB109_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v9 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v10 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB109_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr9 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll index 9f5c9c4c509ed..2cc7c448b2e11 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll @@ -3022,264 +3022,260 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9 -; SI-NEXT: v_readfirstlane_b32 s13, v1 -; SI-NEXT: v_readfirstlane_b32 s12, v2 -; SI-NEXT: v_readfirstlane_b32 s11, v3 -; SI-NEXT: v_readfirstlane_b32 s10, v4 -; SI-NEXT: v_readfirstlane_b32 s9, v5 -; SI-NEXT: v_readfirstlane_b32 s8, v6 -; SI-NEXT: v_readfirstlane_b32 s7, v7 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v8 +; SI-NEXT: v_readfirstlane_b32 s10, v1 +; SI-NEXT: v_readfirstlane_b32 s11, v2 +; SI-NEXT: v_readfirstlane_b32 s8, v3 +; SI-NEXT: v_readfirstlane_b32 s9, v4 +; SI-NEXT: v_readfirstlane_b32 s6, v5 +; SI-NEXT: v_readfirstlane_b32 s7, v6 +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_and_b64 s[12:13], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v8 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s28 -; SI-NEXT: v_mov_b32_e32 v6, s26 -; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v8, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v10, s18 -; SI-NEXT: v_mov_b32_e32 v11, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s29, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s27, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s23, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s19, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s17, v11, 16 -; SI-NEXT: s_lshr_b32 s14, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s8, 16 -; SI-NEXT: s_lshr_b32 s40, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s12, 16 -; SI-NEXT: s_lshr_b32 s42, s29, 16 -; SI-NEXT: s_lshr_b32 s43, s27, 16 -; SI-NEXT: s_lshr_b32 s44, s25, 16 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s21, 16 -; SI-NEXT: s_lshr_b32 s47, s19, 16 -; SI-NEXT: s_lshr_b32 s56, s17, 16 +; SI-NEXT: s_lshr_b32 s76, s5, 16 +; SI-NEXT: s_lshr_b32 s77, s7, 16 +; SI-NEXT: s_lshr_b32 s78, s9, 16 +; SI-NEXT: s_lshr_b32 s79, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s29, 16 +; SI-NEXT: s_lshr_b32 s89, s27, 16 +; SI-NEXT: s_lshr_b32 s90, s25, 16 +; SI-NEXT: s_lshr_b32 s91, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s21, 16 +; SI-NEXT: s_lshr_b32 s93, s19, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s12, s12, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s28 -; SI-NEXT: v_mov_b32_e32 v6, s26 -; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v8, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v10, s18 -; SI-NEXT: v_mov_b32_e32 v11, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s29, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s27, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s23, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s19, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s17, v11, 16 -; SI-NEXT: s_lshr_b32 s14, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s8, 16 -; SI-NEXT: s_lshr_b32 s40, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s12, 16 -; SI-NEXT: s_lshr_b32 s42, s29, 16 -; SI-NEXT: s_lshr_b32 s43, s27, 16 -; SI-NEXT: s_lshr_b32 s44, s25, 16 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s21, 16 -; SI-NEXT: s_lshr_b32 s47, s19, 16 -; SI-NEXT: s_lshr_b32 s56, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b32 s76, s5, 16 +; SI-NEXT: s_lshr_b32 s77, s7, 16 +; SI-NEXT: s_lshr_b32 s78, s9, 16 +; SI-NEXT: s_lshr_b32 s79, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s29, 16 +; SI-NEXT: s_lshr_b32 s89, s27, 16 +; SI-NEXT: s_lshr_b32 s90, s25, 16 +; SI-NEXT: s_lshr_b32 s91, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s21, 16 +; SI-NEXT: s_lshr_b32 s93, s19, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: buffer_store_dword v11, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: s_lshl_b32 s13, s72, 16 +; SI-NEXT: s_and_b32 s15, s16, 0xffff +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: v_mov_b32_e32 v1, s13 +; SI-NEXT: s_and_b32 s13, s17, 0xffff +; SI-NEXT: s_lshl_b32 s15, s94, 16 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_lshl_b32 s13, s62, 16 +; SI-NEXT: s_and_b32 s15, s18, 0xffff +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: v_mov_b32_e32 v3, s13 +; SI-NEXT: s_and_b32 s13, s19, 0xffff +; SI-NEXT: s_lshl_b32 s15, s93, 16 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: v_mov_b32_e32 v4, s13 +; SI-NEXT: s_lshl_b32 s13, s60, 16 +; SI-NEXT: s_and_b32 s15, s20, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s21, 0xffff +; SI-NEXT: s_lshl_b32 s15, s92, 16 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s22, 0xffff +; SI-NEXT: s_lshl_b32 s15, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s23, 0xffff +; SI-NEXT: s_lshl_b32 s15, s91, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s24, 0xffff +; SI-NEXT: s_lshl_b32 s15, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s25, 0xffff +; SI-NEXT: s_lshl_b32 s15, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s26, 0xffff +; SI-NEXT: s_lshl_b32 s15, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s27, 0xffff +; SI-NEXT: s_lshl_b32 s15, s89, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s28, 0xffff +; SI-NEXT: s_lshl_b32 s15, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s29, 0xffff +; SI-NEXT: s_lshl_b32 s15, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s13, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s10, s10, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s79, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr90 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr88 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr79 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr76 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v22i32_to_v44i16_scalar: @@ -11524,171 +11520,171 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9 -; SI-NEXT: v_mov_b32_e32 v23, s16 +; SI-NEXT: v_mov_b32_e32 v21, s16 ; SI-NEXT: v_mov_b32_e32 v22, s17 -; SI-NEXT: v_mov_b32_e32 v21, s18 -; SI-NEXT: v_mov_b32_e32 v19, s19 -; SI-NEXT: v_mov_b32_e32 v18, s20 -; SI-NEXT: v_mov_b32_e32 v17, s21 -; SI-NEXT: v_mov_b32_e32 v16, s22 -; SI-NEXT: v_mov_b32_e32 v15, s23 -; SI-NEXT: v_mov_b32_e32 v14, s24 -; SI-NEXT: v_mov_b32_e32 v12, s25 -; SI-NEXT: v_mov_b32_e32 v13, s26 +; SI-NEXT: v_mov_b32_e32 v19, s18 +; SI-NEXT: v_mov_b32_e32 v20, s19 +; SI-NEXT: v_mov_b32_e32 v17, s20 +; SI-NEXT: v_mov_b32_e32 v18, s21 +; SI-NEXT: v_mov_b32_e32 v15, s22 +; SI-NEXT: v_mov_b32_e32 v16, s23 +; SI-NEXT: v_mov_b32_e32 v13, s24 +; SI-NEXT: v_mov_b32_e32 v14, s25 +; SI-NEXT: v_mov_b32_e32 v11, s26 +; SI-NEXT: v_mov_b32_e32 v12, s27 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v11, s27 -; SI-NEXT: v_mov_b32_e32 v10, s28 -; SI-NEXT: v_mov_b32_e32 v9, s29 +; SI-NEXT: v_mov_b32_e32 v9, s28 +; SI-NEXT: v_mov_b32_e32 v10, s29 ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v20, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v25, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v28, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v30, v11, v13, 16 -; SI-NEXT: v_alignbit_b32 v32, v12, v14, 16 -; SI-NEXT: v_alignbit_b32 v34, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v37, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v39, v19, v21, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v23, 16 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v22 +; SI-NEXT: v_lshr_b64 v[23:24], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[19:20], 16 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 +; SI-NEXT: v_lshr_b64 v[33:34], v[21:22], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: v_lshr_b64 v[23:24], v[7:8], 16 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[26:27], v[1:2], 16 ; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 ; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 ; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_alignbit_b32 v20, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v25, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v28, v9, v10, 16 -; SI-NEXT: v_alignbit_b32 v30, v11, v13, 16 -; SI-NEXT: v_alignbit_b32 v32, v12, v14, 16 -; SI-NEXT: v_alignbit_b32 v34, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v37, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v39, v19, v21, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v23, 16 -; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v9 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v22 +; SI-NEXT: v_lshr_b64 v[28:29], v[11:12], 16 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_lshr_b64 v[30:31], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_lshr_b64 v[31:32], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_lshr_b64 v[32:33], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v22 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 -; SI-NEXT: v_or_b32_e32 v23, v23, v49 -; SI-NEXT: buffer_store_dword v23, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v33 ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: v_or_b32_e32 v21, v21, v33 +; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v39 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v53 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 8, v0 +; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v32 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 12, v0 +; SI-NEXT: v_add_i32_e32 v21, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v37 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v52 +; SI-NEXT: v_or_b32_e32 v19, v19, v20 +; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v31 ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 +; SI-NEXT: v_add_i32_e32 v19, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v50 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v51 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v34 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v30 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v48 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v50 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v32 -; SI-NEXT: v_or_b32_e32 v14, v14, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v29 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v38 -; SI-NEXT: v_or_b32_e32 v12, v12, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v49 +; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v12, v14, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v30 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v28 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v36 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v48 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v28 -; SI-NEXT: v_or_b32_e32 v10, v10, v11 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v27 +; SI-NEXT: v_or_b32_e32 v9, v9, v11 ; SI-NEXT: v_add_i32_e32 v11, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v35 +; SI-NEXT: v_and_b32_e32 v9, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v39 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen @@ -11700,7 +11696,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -11712,7 +11708,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -11724,47 +11720,47 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v20 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr29 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr20 -; SI-NEXT: ; implicit-def: $vgpr27 +; SI-NEXT: ; implicit-def: $vgpr23 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v22f32_to_v44i16_scalar: @@ -19266,264 +19262,260 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9 -; SI-NEXT: v_readfirstlane_b32 s13, v1 -; SI-NEXT: v_readfirstlane_b32 s12, v2 -; SI-NEXT: v_readfirstlane_b32 s11, v3 -; SI-NEXT: v_readfirstlane_b32 s10, v4 -; SI-NEXT: v_readfirstlane_b32 s9, v5 -; SI-NEXT: v_readfirstlane_b32 s8, v6 -; SI-NEXT: v_readfirstlane_b32 s7, v7 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v8 +; SI-NEXT: v_readfirstlane_b32 s10, v1 +; SI-NEXT: v_readfirstlane_b32 s11, v2 +; SI-NEXT: v_readfirstlane_b32 s8, v3 +; SI-NEXT: v_readfirstlane_b32 s9, v4 +; SI-NEXT: v_readfirstlane_b32 s6, v5 +; SI-NEXT: v_readfirstlane_b32 s7, v6 +; SI-NEXT: v_readfirstlane_b32 s4, v7 +; SI-NEXT: s_and_b64 s[12:13], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v8 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s28 -; SI-NEXT: v_mov_b32_e32 v6, s26 -; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v8, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v10, s18 -; SI-NEXT: v_mov_b32_e32 v11, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s29, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s27, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s23, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s19, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s17, v11, 16 -; SI-NEXT: s_lshr_b32 s14, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s8, 16 -; SI-NEXT: s_lshr_b32 s40, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s12, 16 -; SI-NEXT: s_lshr_b32 s42, s29, 16 -; SI-NEXT: s_lshr_b32 s43, s27, 16 -; SI-NEXT: s_lshr_b32 s44, s25, 16 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s21, 16 -; SI-NEXT: s_lshr_b32 s47, s19, 16 -; SI-NEXT: s_lshr_b32 s56, s17, 16 +; SI-NEXT: s_lshr_b32 s76, s5, 16 +; SI-NEXT: s_lshr_b32 s77, s7, 16 +; SI-NEXT: s_lshr_b32 s78, s9, 16 +; SI-NEXT: s_lshr_b32 s79, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s29, 16 +; SI-NEXT: s_lshr_b32 s89, s27, 16 +; SI-NEXT: s_lshr_b32 s90, s25, 16 +; SI-NEXT: s_lshr_b32 s91, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s21, 16 +; SI-NEXT: s_lshr_b32 s93, s19, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s28 -; SI-NEXT: v_mov_b32_e32 v6, s26 -; SI-NEXT: v_mov_b32_e32 v7, s24 -; SI-NEXT: v_mov_b32_e32 v8, s22 -; SI-NEXT: v_mov_b32_e32 v9, s20 -; SI-NEXT: v_mov_b32_e32 v10, s18 -; SI-NEXT: v_mov_b32_e32 v11, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s29, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s27, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s25, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s23, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s21, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s19, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s17, v11, 16 -; SI-NEXT: s_lshr_b32 s14, s6, 16 -; SI-NEXT: s_lshr_b32 s15, s8, 16 -; SI-NEXT: s_lshr_b32 s40, s10, 16 -; SI-NEXT: s_lshr_b32 s41, s12, 16 -; SI-NEXT: s_lshr_b32 s42, s29, 16 -; SI-NEXT: s_lshr_b32 s43, s27, 16 -; SI-NEXT: s_lshr_b32 s44, s25, 16 -; SI-NEXT: s_lshr_b32 s45, s23, 16 -; SI-NEXT: s_lshr_b32 s46, s21, 16 -; SI-NEXT: s_lshr_b32 s47, s19, 16 -; SI-NEXT: s_lshr_b32 s56, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s76, s5, 16 +; SI-NEXT: s_lshr_b32 s77, s7, 16 +; SI-NEXT: s_lshr_b32 s78, s9, 16 +; SI-NEXT: s_lshr_b32 s79, s11, 16 +; SI-NEXT: s_lshr_b32 s88, s29, 16 +; SI-NEXT: s_lshr_b32 s89, s27, 16 +; SI-NEXT: s_lshr_b32 s90, s25, 16 +; SI-NEXT: s_lshr_b32 s91, s23, 16 +; SI-NEXT: s_lshr_b32 s92, s21, 16 +; SI-NEXT: s_lshr_b32 s93, s19, 16 +; SI-NEXT: s_lshr_b32 s94, s17, 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: buffer_store_dword v11, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: s_lshl_b32 s13, s72, 16 +; SI-NEXT: s_and_b32 s15, s16, 0xffff +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: v_mov_b32_e32 v1, s13 +; SI-NEXT: s_and_b32 s13, s17, 0xffff +; SI-NEXT: s_lshl_b32 s15, s94, 16 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_lshl_b32 s13, s62, 16 +; SI-NEXT: s_and_b32 s15, s18, 0xffff +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: v_mov_b32_e32 v3, s13 +; SI-NEXT: s_and_b32 s13, s19, 0xffff +; SI-NEXT: s_lshl_b32 s15, s93, 16 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: v_mov_b32_e32 v4, s13 +; SI-NEXT: s_lshl_b32 s13, s60, 16 +; SI-NEXT: s_and_b32 s15, s20, 0xffff +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s13, s15, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s21, 0xffff +; SI-NEXT: s_lshl_b32 s15, s92, 16 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s22, 0xffff +; SI-NEXT: s_lshl_b32 s15, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s23, 0xffff +; SI-NEXT: s_lshl_b32 s15, s91, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s24, 0xffff +; SI-NEXT: s_lshl_b32 s15, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s25, 0xffff +; SI-NEXT: s_lshl_b32 s15, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s26, 0xffff +; SI-NEXT: s_lshl_b32 s15, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s15, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s27, 0xffff +; SI-NEXT: s_lshl_b32 s15, s89, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s14, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s28, 0xffff +; SI-NEXT: s_lshl_b32 s15, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s13, s29, 0xffff +; SI-NEXT: s_lshl_b32 s15, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s13, s13, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s13, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s10, s10, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s79, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s77, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s12, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr58 +; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr90 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr89 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr88 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr79 ; SI-NEXT: ; implicit-def: $sgpr40 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr15 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr78 ; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr77 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr76 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v11i64_to_v44i16_scalar: @@ -26272,131 +26264,131 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: v_mov_b32_e32 v10, s29 ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v23, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v25, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v27, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v32, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v34, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v36, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v39, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v21, 16 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v22 +; SI-NEXT: v_lshr_b64 v[23:24], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[19:20], 16 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v22 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[21:22], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 ; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 ; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_alignbit_b32 v23, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v24, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v25, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v27, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v32, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v34, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v36, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v39, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v21, 16 -; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v22 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_lshr_b64 v[23:24], v[7:8], 16 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 +; SI-NEXT: v_lshr_b64 v[24:25], v[5:6], 16 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[3:4], 16 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_lshr_b64 v[26:27], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[11:12], 16 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_lshr_b64 v[27:28], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v22 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v34 ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_or_b32_e32 v21, v21, v49 +; SI-NEXT: v_or_b32_e32 v21, v21, v30 ; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v54 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v53 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v32 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v52 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v29 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v51 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v28 ; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v50 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v29 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v31 ; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v49 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen @@ -26408,7 +26400,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v9, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v9, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v48 ; SI-NEXT: v_or_b32_e32 v9, v9, v10 ; SI-NEXT: v_add_i32_e32 v10, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen @@ -26420,7 +26412,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v9, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -26432,7 +26424,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -26444,7 +26436,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -26456,35 +26448,35 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v28 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr48 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr31 ; SI-NEXT: ; implicit-def: $vgpr24 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr28 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v11f64_to_v44i16_scalar: @@ -35515,368 +35507,413 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i ; SI-LABEL: bitcast_v44f16_to_v44i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v20 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_cvt_f16_f32_e32 v57, v2 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_cvt_f16_f32_e32 v58, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v40, s16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v63, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v54, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v61, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v62, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v59, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v60, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v52, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v29, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v28, s29 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v50, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v26, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v48, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v29, s29 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true +; SI-NEXT: v_cvt_f32_f16_e32 v5, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v63 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_or_b32_e32 v26, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v61 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v24 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v5 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_or_b32_e32 v1, v1, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_or_b32_e32 v3, v3, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v6, v6, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v61 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v24, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v16, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v57 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v22 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v29 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v57 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v22, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v47 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v36 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v47 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v18, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v14, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v42 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v32 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v42 +; SI-NEXT: v_or_b32_e32 v10, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v8 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v30 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v41 +; SI-NEXT: v_or_b32_e32 v8, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v55 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v27 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v55 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_or_b32_e32 v6, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v53 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v19 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v4 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_or_b32_e32 v4, v3, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v33 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v11 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 ; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v9, v9, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v52 +; SI-NEXT: v_or_b32_e32 v2, v2, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v40 ; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_or_b32_e32 v13, v13, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_or_b32_e32 v31, v31, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v52 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_or_b32_e32 v54, v12, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v58 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_or_b32_e32 v60, v19, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v46 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_or_b32_e32 v34, v34, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v52 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v51 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_or_b32_e32 v40, v11, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v62 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_or_b32_e32 v58, v12, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v45 +; SI-NEXT: v_or_b32_e32 v46, v19, v9 +; SI-NEXT: v_or_b32_e32 v62, v11, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v56 +; SI-NEXT: v_cvt_f32_f16_e32 v19, v35 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_or_b32_e32 v56, v11, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v43 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_or_b32_e32 v37, v37, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v18, v18, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v22 -; SI-NEXT: v_or_b32_e32 v21, v21, v53 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v25 -; SI-NEXT: v_or_b32_e32 v24, v24, v53 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v51 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v50 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v49 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v48 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_or_b32_e32 v27, v27, v52 -; SI-NEXT: v_or_b32_e32 v26, v26, v30 -; SI-NEXT: v_or_b32_e32 v23, v23, v29 -; SI-NEXT: v_or_b32_e32 v20, v20, v28 -; SI-NEXT: v_or_b32_e32 v39, v39, v51 -; SI-NEXT: v_or_b32_e32 v36, v36, v50 -; SI-NEXT: v_or_b32_e32 v33, v33, v49 -; SI-NEXT: v_or_b32_e32 v15, v15, v48 -; SI-NEXT: v_or_b32_e32 v11, v11, v17 -; SI-NEXT: v_or_b32_e32 v8, v8, v16 -; SI-NEXT: v_or_b32_e32 v5, v5, v12 -; SI-NEXT: v_alignbit_b32 v52, v24, v52, 16 -; SI-NEXT: v_alignbit_b32 v30, v21, v30, 16 -; SI-NEXT: v_alignbit_b32 v29, v18, v29, 16 -; SI-NEXT: v_alignbit_b32 v28, v37, v28, 16 -; SI-NEXT: v_alignbit_b32 v51, v34, v51, 16 -; SI-NEXT: v_alignbit_b32 v50, v31, v50, 16 -; SI-NEXT: v_alignbit_b32 v49, v13, v49, 16 -; SI-NEXT: v_alignbit_b32 v48, v9, v48, 16 -; SI-NEXT: v_alignbit_b32 v17, v6, v17, 16 -; SI-NEXT: v_alignbit_b32 v16, v3, v16, 16 -; SI-NEXT: v_alignbit_b32 v12, v1, v12, 16 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v45, v12, v7 +; SI-NEXT: v_or_b32_e32 v12, v19, v3 +; SI-NEXT: v_or_b32_e32 v43, v11, v5 +; SI-NEXT: v_or_b32_e32 v11, v20, v1 +; SI-NEXT: v_lshr_b64 v[29:30], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_mov_b32_e32 v35, v12 +; SI-NEXT: v_mov_b32_e32 v33, v11 +; SI-NEXT: v_lshr_b64 v[30:31], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[19:20], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[11:12], v[1:2], 16 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52 -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 -; SI-NEXT: v_or_b32_e32 v27, v27, v52 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v30 -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 8, v0 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v50 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v40 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v29 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v28 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v51 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v35 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v50 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v32 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v49 -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 48, v0 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v48 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v54 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v48 -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v13, v13, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 56, v0 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v9, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v17 -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v9, v9, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 64, v0 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v38 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v62 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v16 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x48, v0 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x50, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v29 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v60 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v57 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v58 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v36 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v56 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v44 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v46 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v42 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v45 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v41 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v43 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v53 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll index d7faaa68732d2..c35e183fa787f 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll @@ -3189,289 +3189,301 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v24i32_to_v48i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v12, s30, 0 +; SI-NEXT: v_writelane_b32 v12, s31, 1 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 -; SI-NEXT: v_readfirstlane_b32 s15, v1 -; SI-NEXT: v_readfirstlane_b32 s14, v2 -; SI-NEXT: v_readfirstlane_b32 s13, v3 -; SI-NEXT: v_readfirstlane_b32 s12, v4 -; SI-NEXT: v_readfirstlane_b32 s11, v5 -; SI-NEXT: v_readfirstlane_b32 s10, v6 -; SI-NEXT: v_readfirstlane_b32 s9, v7 -; SI-NEXT: v_readfirstlane_b32 s8, v8 -; SI-NEXT: v_readfirstlane_b32 s7, v9 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v10 +; SI-NEXT: v_writelane_b32 v12, s34, 2 +; SI-NEXT: v_readfirstlane_b32 s12, v1 +; SI-NEXT: v_readfirstlane_b32 s13, v2 +; SI-NEXT: v_readfirstlane_b32 s10, v3 +; SI-NEXT: v_readfirstlane_b32 s11, v4 +; SI-NEXT: v_readfirstlane_b32 s8, v5 +; SI-NEXT: v_readfirstlane_b32 s9, v6 +; SI-NEXT: v_readfirstlane_b32 s6, v7 +; SI-NEXT: v_readfirstlane_b32 s7, v8 +; SI-NEXT: v_readfirstlane_b32 s4, v9 +; SI-NEXT: s_and_b64 s[14:15], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v10 +; SI-NEXT: v_writelane_b32 v12, s35, 3 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v7, s26 -; SI-NEXT: v_mov_b32_e32 v8, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v10, s20 -; SI-NEXT: v_mov_b32_e32 v11, s18 -; SI-NEXT: v_mov_b32_e32 v12, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s27, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s25, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s21, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s19, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s17, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s43, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s14, 16 -; SI-NEXT: s_lshr_b32 s45, s29, 16 -; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: s_lshr_b32 s47, s25, 16 -; SI-NEXT: s_lshr_b32 s56, s23, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 16 -; SI-NEXT: s_lshr_b32 s58, s19, 16 -; SI-NEXT: s_lshr_b32 s59, s17, 16 +; SI-NEXT: s_lshr_b32 s88, s5, 16 +; SI-NEXT: s_lshr_b32 s89, s7, 16 +; SI-NEXT: s_lshr_b32 s90, s9, 16 +; SI-NEXT: s_lshr_b32 s91, s11, 16 +; SI-NEXT: s_lshr_b32 s92, s13, 16 +; SI-NEXT: s_lshr_b32 s93, s29, 16 +; SI-NEXT: s_lshr_b32 s94, s27, 16 +; SI-NEXT: s_lshr_b32 s95, s25, 16 +; SI-NEXT: s_lshr_b32 s30, s23, 16 +; SI-NEXT: s_lshr_b32 s31, s21, 16 +; SI-NEXT: s_lshr_b32 s34, s19, 16 +; SI-NEXT: s_lshr_b32 s35, s17, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 ; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s14, s14, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v7, s26 -; SI-NEXT: v_mov_b32_e32 v8, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v10, s20 -; SI-NEXT: v_mov_b32_e32 v11, s18 -; SI-NEXT: v_mov_b32_e32 v12, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s27, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s25, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s21, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s19, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s17, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s43, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s14, 16 -; SI-NEXT: s_lshr_b32 s45, s29, 16 -; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: s_lshr_b32 s47, s25, 16 -; SI-NEXT: s_lshr_b32 s56, s23, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 16 -; SI-NEXT: s_lshr_b32 s58, s19, 16 -; SI-NEXT: s_lshr_b32 s59, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b32 s88, s5, 16 +; SI-NEXT: s_lshr_b32 s89, s7, 16 +; SI-NEXT: s_lshr_b32 s90, s9, 16 +; SI-NEXT: s_lshr_b32 s91, s11, 16 +; SI-NEXT: s_lshr_b32 s92, s13, 16 +; SI-NEXT: s_lshr_b32 s93, s29, 16 +; SI-NEXT: s_lshr_b32 s94, s27, 16 +; SI-NEXT: s_lshr_b32 s95, s25, 16 +; SI-NEXT: s_lshr_b32 s30, s23, 16 +; SI-NEXT: s_lshr_b32 s31, s21, 16 +; SI-NEXT: s_lshr_b32 s34, s19, 16 +; SI-NEXT: s_lshr_b32 s35, s17, 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: buffer_store_dword v12, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: s_lshl_b32 s15, s76, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_mov_b32_e32 v1, s15 +; SI-NEXT: s_and_b32 s15, s17, 0xffff +; SI-NEXT: s_lshl_b32 s16, s35, 16 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_lshl_b32 s15, s74, 16 +; SI-NEXT: s_and_b32 s16, s18, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_mov_b32_e32 v3, s15 +; SI-NEXT: s_and_b32 s15, s19, 0xffff +; SI-NEXT: s_lshl_b32 s16, s34, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s20, 0xffff +; SI-NEXT: s_lshl_b32 s16, s72, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s21, 0xffff +; SI-NEXT: s_lshl_b32 s16, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s22, 0xffff +; SI-NEXT: s_lshl_b32 s16, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s23, 0xffff +; SI-NEXT: s_lshl_b32 s16, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s24, 0xffff +; SI-NEXT: s_lshl_b32 s16, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s25, 0xffff +; SI-NEXT: s_lshl_b32 s16, s95, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s26, 0xffff +; SI-NEXT: s_lshl_b32 s16, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s27, 0xffff +; SI-NEXT: s_lshl_b32 s16, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s28, 0xffff +; SI-NEXT: s_lshl_b32 s16, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s29, 0xffff +; SI-NEXT: s_lshl_b32 s16, s93, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s15, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s12, s12, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s91, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s89, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s35, v12, 3 +; SI-NEXT: v_readlane_b32 s34, v12, 2 +; SI-NEXT: v_readlane_b32 s31, v12, 1 +; SI-NEXT: v_readlane_b32 s30, v12, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr94 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr93 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr90 ; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr88 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v24i32_to_v48i16_scalar: @@ -12561,180 +12573,186 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 -; SI-NEXT: v_mov_b32_e32 v26, s16 +; SI-NEXT: v_mov_b32_e32 v23, s16 ; SI-NEXT: v_mov_b32_e32 v24, s17 -; SI-NEXT: v_mov_b32_e32 v23, s18 +; SI-NEXT: v_mov_b32_e32 v21, s18 ; SI-NEXT: v_mov_b32_e32 v22, s19 -; SI-NEXT: v_mov_b32_e32 v20, s20 -; SI-NEXT: v_mov_b32_e32 v19, s21 -; SI-NEXT: v_mov_b32_e32 v18, s22 -; SI-NEXT: v_mov_b32_e32 v15, s23 -; SI-NEXT: v_mov_b32_e32 v17, s24 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v19, s20 +; SI-NEXT: v_mov_b32_e32 v20, s21 +; SI-NEXT: v_mov_b32_e32 v17, s22 +; SI-NEXT: v_mov_b32_e32 v18, s23 +; SI-NEXT: v_mov_b32_e32 v15, s24 ; SI-NEXT: v_mov_b32_e32 v16, s25 -; SI-NEXT: v_mov_b32_e32 v14, s26 -; SI-NEXT: v_mov_b32_e32 v13, s27 -; SI-NEXT: v_mov_b32_e32 v12, s28 -; SI-NEXT: v_mov_b32_e32 v11, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s26 +; SI-NEXT: v_mov_b32_e32 v14, s27 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v11, s28 +; SI-NEXT: v_mov_b32_e32 v12, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v21, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v25, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v28, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v29, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v34, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v36, v16, v17, 16 -; SI-NEXT: v_alignbit_b32 v38, v15, v18, 16 -; SI-NEXT: v_alignbit_b32 v48, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v51, v22, v23, 16 -; SI-NEXT: v_alignbit_b32 v53, v24, v26, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v24 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v24 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[23:24], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 ; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[11:12], 16 ; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 ; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 ; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 ; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_alignbit_b32 v21, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v25, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v28, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v29, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, v11, v12, 16 -; SI-NEXT: v_alignbit_b32 v34, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v36, v16, v17, 16 -; SI-NEXT: v_alignbit_b32 v38, v15, v18, 16 -; SI-NEXT: v_alignbit_b32 v48, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v51, v22, v23, 16 -; SI-NEXT: v_alignbit_b32 v53, v24, v26, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v24 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[28:29], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[19:20], 16 +; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 +; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_lshr_b64 v[29:30], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v24 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v53 -; SI-NEXT: v_or_b32_e32 v26, v26, v53 -; SI-NEXT: buffer_store_dword v26, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v40 -; SI-NEXT: v_or_b32_e32 v24, v24, v26 -; SI-NEXT: v_add_i32_e32 v26, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v24, v26, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v37 ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v23, v23, v31 +; SI-NEXT: buffer_store_dword v23, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v51 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v42 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 8, v0 +; SI-NEXT: v_add_i32_e32 v24, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v55 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v36 +; SI-NEXT: v_or_b32_e32 v21, v21, v23 +; SI-NEXT: v_add_i32_e32 v23, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v48 -; SI-NEXT: v_or_b32_e32 v20, v20, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v41 +; SI-NEXT: v_or_b32_e32 v21, v21, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v35 +; SI-NEXT: v_or_b32_e32 v19, v19, v21 +; SI-NEXT: v_add_i32_e32 v21, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v40 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v34 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v52 -; SI-NEXT: v_or_b32_e32 v15, v15, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 +; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v15, v18, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v54 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v34 -; SI-NEXT: v_or_b32_e32 v14, v14, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v30 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v49 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v53 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v31 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 +; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v39 +; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v52 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen @@ -12746,7 +12764,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -12758,7 +12776,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -12770,62 +12788,64 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v25 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr50 -; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr26 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v24f32_to_v48i16_scalar: @@ -21128,289 +21148,301 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v12i64_to_v48i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v12, s30, 0 +; SI-NEXT: v_writelane_b32 v12, s31, 1 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11 -; SI-NEXT: v_readfirstlane_b32 s15, v1 -; SI-NEXT: v_readfirstlane_b32 s14, v2 -; SI-NEXT: v_readfirstlane_b32 s13, v3 -; SI-NEXT: v_readfirstlane_b32 s12, v4 -; SI-NEXT: v_readfirstlane_b32 s11, v5 -; SI-NEXT: v_readfirstlane_b32 s10, v6 -; SI-NEXT: v_readfirstlane_b32 s9, v7 -; SI-NEXT: v_readfirstlane_b32 s8, v8 -; SI-NEXT: v_readfirstlane_b32 s7, v9 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v10 +; SI-NEXT: v_writelane_b32 v12, s34, 2 +; SI-NEXT: v_readfirstlane_b32 s12, v1 +; SI-NEXT: v_readfirstlane_b32 s13, v2 +; SI-NEXT: v_readfirstlane_b32 s10, v3 +; SI-NEXT: v_readfirstlane_b32 s11, v4 +; SI-NEXT: v_readfirstlane_b32 s8, v5 +; SI-NEXT: v_readfirstlane_b32 s9, v6 +; SI-NEXT: v_readfirstlane_b32 s6, v7 +; SI-NEXT: v_readfirstlane_b32 s7, v8 +; SI-NEXT: v_readfirstlane_b32 s4, v9 +; SI-NEXT: s_and_b64 s[14:15], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v10 +; SI-NEXT: v_writelane_b32 v12, s35, 3 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v7, s26 -; SI-NEXT: v_mov_b32_e32 v8, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v10, s20 -; SI-NEXT: v_mov_b32_e32 v11, s18 -; SI-NEXT: v_mov_b32_e32 v12, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s27, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s25, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s21, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s19, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s17, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s43, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s14, 16 -; SI-NEXT: s_lshr_b32 s45, s29, 16 -; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: s_lshr_b32 s47, s25, 16 -; SI-NEXT: s_lshr_b32 s56, s23, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 16 -; SI-NEXT: s_lshr_b32 s58, s19, 16 -; SI-NEXT: s_lshr_b32 s59, s17, 16 +; SI-NEXT: s_lshr_b32 s88, s5, 16 +; SI-NEXT: s_lshr_b32 s89, s7, 16 +; SI-NEXT: s_lshr_b32 s90, s9, 16 +; SI-NEXT: s_lshr_b32 s91, s11, 16 +; SI-NEXT: s_lshr_b32 s92, s13, 16 +; SI-NEXT: s_lshr_b32 s93, s29, 16 +; SI-NEXT: s_lshr_b32 s94, s27, 16 +; SI-NEXT: s_lshr_b32 s95, s25, 16 +; SI-NEXT: s_lshr_b32 s30, s23, 16 +; SI-NEXT: s_lshr_b32 s31, s21, 16 +; SI-NEXT: s_lshr_b32 s34, s19, 16 +; SI-NEXT: s_lshr_b32 s35, s17, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s28 -; SI-NEXT: v_mov_b32_e32 v7, s26 -; SI-NEXT: v_mov_b32_e32 v8, s24 -; SI-NEXT: v_mov_b32_e32 v9, s22 -; SI-NEXT: v_mov_b32_e32 v10, s20 -; SI-NEXT: v_mov_b32_e32 v11, s18 -; SI-NEXT: v_mov_b32_e32 v12, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s29, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s27, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s25, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s23, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s21, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s19, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s17, v12, 16 -; SI-NEXT: s_lshr_b32 s40, s6, 16 -; SI-NEXT: s_lshr_b32 s41, s8, 16 -; SI-NEXT: s_lshr_b32 s42, s10, 16 -; SI-NEXT: s_lshr_b32 s43, s12, 16 -; SI-NEXT: s_lshr_b32 s44, s14, 16 -; SI-NEXT: s_lshr_b32 s45, s29, 16 -; SI-NEXT: s_lshr_b32 s46, s27, 16 -; SI-NEXT: s_lshr_b32 s47, s25, 16 -; SI-NEXT: s_lshr_b32 s56, s23, 16 -; SI-NEXT: s_lshr_b32 s57, s21, 16 -; SI-NEXT: s_lshr_b32 s58, s19, 16 -; SI-NEXT: s_lshr_b32 s59, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s88, s5, 16 +; SI-NEXT: s_lshr_b32 s89, s7, 16 +; SI-NEXT: s_lshr_b32 s90, s9, 16 +; SI-NEXT: s_lshr_b32 s91, s11, 16 +; SI-NEXT: s_lshr_b32 s92, s13, 16 +; SI-NEXT: s_lshr_b32 s93, s29, 16 +; SI-NEXT: s_lshr_b32 s94, s27, 16 +; SI-NEXT: s_lshr_b32 s95, s25, 16 +; SI-NEXT: s_lshr_b32 s30, s23, 16 +; SI-NEXT: s_lshr_b32 s31, s21, 16 +; SI-NEXT: s_lshr_b32 s34, s19, 16 +; SI-NEXT: s_lshr_b32 s35, s17, 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; SI-NEXT: buffer_store_dword v12, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: s_lshl_b32 s15, s76, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_mov_b32_e32 v1, s15 +; SI-NEXT: s_and_b32 s15, s17, 0xffff +; SI-NEXT: s_lshl_b32 s16, s35, 16 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_lshl_b32 s15, s74, 16 +; SI-NEXT: s_and_b32 s16, s18, 0xffff +; SI-NEXT: s_or_b32 s15, s16, s15 +; SI-NEXT: v_mov_b32_e32 v3, s15 +; SI-NEXT: s_and_b32 s15, s19, 0xffff +; SI-NEXT: s_lshl_b32 s16, s34, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s20, 0xffff +; SI-NEXT: s_lshl_b32 s16, s72, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s21, 0xffff +; SI-NEXT: s_lshl_b32 s16, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s22, 0xffff +; SI-NEXT: s_lshl_b32 s16, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s23, 0xffff +; SI-NEXT: s_lshl_b32 s16, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s24, 0xffff +; SI-NEXT: s_lshl_b32 s16, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s25, 0xffff +; SI-NEXT: s_lshl_b32 s16, s95, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s26, 0xffff +; SI-NEXT: s_lshl_b32 s16, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s27, 0xffff +; SI-NEXT: s_lshl_b32 s16, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s41, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s28, 0xffff +; SI-NEXT: s_lshl_b32 s16, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s40, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s15, s29, 0xffff +; SI-NEXT: s_lshl_b32 s16, s93, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s15, s15, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s15 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s15, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s12, s12, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s91, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s90, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s89, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s14, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s35, v12, 3 +; SI-NEXT: v_readlane_b32 s34, v12, 2 +; SI-NEXT: v_readlane_b32 s31, v12, 1 +; SI-NEXT: v_readlane_b32 s30, v12, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr62 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr60 +; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr94 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr93 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr91 ; SI-NEXT: ; implicit-def: $sgpr42 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr41 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr90 ; SI-NEXT: ; implicit-def: $sgpr40 +; SI-NEXT: ; implicit-def: $sgpr89 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr88 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v12i64_to_v48i16_scalar: @@ -28931,153 +28963,159 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v11, s28 ; SI-NEXT: v_mov_b32_e32 v12, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v25, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v26, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v28, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v29, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v35, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v38, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v48, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v50, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v53, v24, v23, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v24 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v24 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[23:24], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 ; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_alignbit_b32 v25, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v26, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v27, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v28, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v29, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v31, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v33, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v35, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v38, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v48, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v50, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v53, v24, v23, 16 -; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v22 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v24 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_lshr_b64 v[25:26], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[11:12], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_lshr_b64 v[26:27], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_lshr_b64 v[27:28], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[28:29], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v2 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v20 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v24 ; SI-NEXT: .LBB49_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v37 ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v53 -; SI-NEXT: v_or_b32_e32 v23, v23, v53 +; SI-NEXT: v_or_b32_e32 v23, v23, v31 ; SI-NEXT: buffer_store_dword v23, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v42 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v36 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v41 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v35 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v40 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v34 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v54 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v30 ; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v53 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 ; SI-NEXT: v_or_b32_e32 v11, v11, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v11, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v52 ; SI-NEXT: v_or_b32_e32 v11, v11, v12 ; SI-NEXT: v_add_i32_e32 v12, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen @@ -29089,7 +29127,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v11, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -29101,7 +29139,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -29113,7 +29151,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -29125,7 +29163,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -29137,38 +29175,40 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr40 -; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr54 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr52 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr31 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr25 -; SI-NEXT: ; implicit-def: $vgpr30 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v12f64_to_v48i16_scalar: @@ -39277,428 +39317,464 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i ; SI-LABEL: bitcast_v48f16_to_v48i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:8 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:12 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v3 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:16 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v53, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v62, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v42, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v59, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v58, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v57, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v46, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v26, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v27, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v29, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v17, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v16, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v28, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v54, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v52, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v50, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v29, s29 ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v40 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f16_f32_e32 v26, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v32 ; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v20 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v43 -; SI-NEXT: v_cvt_f16_f32_e32 v40, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v35 ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v30 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v53 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v53 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v55 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v51 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v54 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v52 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v29 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v50 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v38 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v57 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v59 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v62 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v59, v3, v19 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v62, v1, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v55 +; SI-NEXT: v_or_b32_e32 v57, v3, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v44 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 +; SI-NEXT: v_or_b32_e32 v1, v1, v21 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v35, v5, v17 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v11 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v3, v3, v13 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v3, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v53 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_or_b32_e32 v33, v5, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v32 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_or_b32_e32 v32, v1, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v30 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_or_b32_e32 v29, v3, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v39 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v28, v26, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v31 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_or_b32_e32 v27, v1, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v43 ; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v43, v25 +; SI-NEXT: v_or_b32_e32 v44, v26, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v41 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v43 +; SI-NEXT: v_or_b32_e32 v2, v2, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v40 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v41, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v41 +; SI-NEXT: v_or_b32_e32 v4, v4, v25 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v26 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v40 +; SI-NEXT: v_or_b32_e32 v6, v6, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v56 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v45 +; SI-NEXT: v_or_b32_e32 v8, v8, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v47 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v60 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v25 ; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_or_b32_e32 v2, v2, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v26 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v56 +; SI-NEXT: v_or_b32_e32 v10, v10, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v47 +; SI-NEXT: v_or_b32_e32 v12, v12, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v63 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v60 +; SI-NEXT: v_or_b32_e32 v14, v14, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_or_b32_e32 v5, v5, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_or_b32_e32 v11, v11, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v61 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_or_b32_e32 v10, v10, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_or_b32_e32 v14, v14, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v25 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_or_b32_e32 v35, v35, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v33 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v26 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_or_b32_e32 v34, v34, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v25 ; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v63 +; SI-NEXT: v_or_b32_e32 v18, v18, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v61 +; SI-NEXT: v_or_b32_e32 v22, v22, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v58 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v46 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v25 +; SI-NEXT: v_cvt_f32_f16_e32 v25, v42 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v26 ; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_or_b32_e32 v38, v38, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_or_b32_e32 v49, v49, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v17 -; SI-NEXT: v_or_b32_e32 v18, v18, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v19 -; SI-NEXT: v_or_b32_e32 v23, v23, v50 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v20 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v41 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v40 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v55 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v53 -; SI-NEXT: v_or_b32_e32 v22, v22, v50 -; SI-NEXT: v_or_b32_e32 v25, v25, v30 -; SI-NEXT: v_or_b32_e32 v24, v24, v29 -; SI-NEXT: v_or_b32_e32 v21, v21, v41 -; SI-NEXT: v_or_b32_e32 v16, v16, v28 -; SI-NEXT: v_or_b32_e32 v48, v48, v54 -; SI-NEXT: v_or_b32_e32 v39, v39, v42 -; SI-NEXT: v_or_b32_e32 v32, v32, v52 -; SI-NEXT: v_or_b32_e32 v31, v31, v51 -; SI-NEXT: v_or_b32_e32 v15, v15, v43 -; SI-NEXT: v_or_b32_e32 v8, v8, v27 -; SI-NEXT: v_or_b32_e32 v7, v7, v26 -; SI-NEXT: v_or_b32_e32 v6, v6, v44 -; SI-NEXT: v_alignbit_b32 v40, v22, v30, 16 -; SI-NEXT: v_alignbit_b32 v30, v23, v29, 16 -; SI-NEXT: v_alignbit_b32 v29, v18, v41, 16 -; SI-NEXT: v_alignbit_b32 v28, v49, v28, 16 -; SI-NEXT: v_alignbit_b32 v55, v38, v54, 16 -; SI-NEXT: v_alignbit_b32 v54, v34, v42, 16 -; SI-NEXT: v_alignbit_b32 v53, v35, v52, 16 -; SI-NEXT: v_alignbit_b32 v52, v14, v51, 16 -; SI-NEXT: v_alignbit_b32 v51, v10, v43, 16 -; SI-NEXT: v_alignbit_b32 v50, v11, v27, 16 -; SI-NEXT: v_alignbit_b32 v27, v5, v26, 16 -; SI-NEXT: v_alignbit_b32 v26, v2, v44, 16 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v25 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_or_b32_e32 v16, v16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v58 +; SI-NEXT: v_lshr_b64 v[50:51], v[15:16], 16 +; SI-NEXT: v_or_b32_e32 v20, v20, v25 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v42 +; SI-NEXT: v_mov_b32_e32 v51, v29 +; SI-NEXT: v_lshr_b64 v[29:30], v[21:22], 16 +; SI-NEXT: v_or_b32_e32 v24, v24, v25 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[25:26], v[1:2], 16 +; SI-NEXT: v_mov_b32_e32 v31, v44 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[54:55], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[13:14], 16 +; SI-NEXT: v_mov_b32_e32 v55, v35 +; SI-NEXT: v_mov_b32_e32 v53, v32 +; SI-NEXT: v_mov_b32_e32 v49, v28 +; SI-NEXT: v_mov_b32_e32 v39, v27 +; SI-NEXT: v_lshr_b64 v[36:37], v[11:12], 16 +; SI-NEXT: v_mov_b32_e32 v11, v33 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[3:4], 16 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; SI-NEXT: v_or_b32_e32 v25, v25, v40 -; SI-NEXT: v_or_b32_e32 v20, v22, v20 -; SI-NEXT: v_add_i32_e32 v22, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v25, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v62 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v30 -; SI-NEXT: v_or_b32_e32 v20, v20, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v42 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v52 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v59 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v29 -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v17, v18, v17 -; SI-NEXT: v_add_i32_e32 v18, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v28 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v50 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v57 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v49 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v36 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v46 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt vmcnt(6) expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v29 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v55 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v38 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v37 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v55 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v39 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v54 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v38 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v53 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 -; SI-NEXT: v_add_i32_e32 v17, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: v_or_b32_e32 v12, v16, v12 -; SI-NEXT: v_add_i32_e32 v16, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v12, v16, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v36 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v52 -; SI-NEXT: v_or_b32_e32 v12, v12, v16 -; SI-NEXT: v_add_i32_e32 v16, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v12, v16, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v51 -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v12, v12, v13 -; SI-NEXT: v_add_i32_e32 v13, vcc, 64, v0 -; SI-NEXT: v_or_b32_e32 v9, v10, v9 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v56 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v50 -; SI-NEXT: v_or_b32_e32 v8, v8, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v8, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; SI-NEXT: v_or_b32_e32 v3, v8, v3 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v3, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v27 -; SI-NEXT: v_or_b32_e32 v3, v3, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v49 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v26 -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v40 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v41 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v31 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v25 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll index ed67c6b397f11..29005a42d8860 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll @@ -3408,313 +3408,333 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v26i32_to_v52i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v14, s30, 0 +; SI-NEXT: v_writelane_b32 v14, s31, 1 +; SI-NEXT: v_writelane_b32 v14, s34, 2 +; SI-NEXT: v_writelane_b32 v14, s35, 3 +; SI-NEXT: v_writelane_b32 v14, s36, 4 +; SI-NEXT: v_writelane_b32 v14, s37, 5 +; SI-NEXT: v_writelane_b32 v14, s38, 6 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; SI-NEXT: v_readfirstlane_b32 s41, v1 -; SI-NEXT: v_readfirstlane_b32 s40, v2 -; SI-NEXT: v_readfirstlane_b32 s15, v3 -; SI-NEXT: v_readfirstlane_b32 s14, v4 -; SI-NEXT: v_readfirstlane_b32 s13, v5 -; SI-NEXT: v_readfirstlane_b32 s12, v6 -; SI-NEXT: v_readfirstlane_b32 s11, v7 -; SI-NEXT: v_readfirstlane_b32 s10, v8 -; SI-NEXT: v_readfirstlane_b32 s9, v9 -; SI-NEXT: v_readfirstlane_b32 s8, v10 -; SI-NEXT: v_readfirstlane_b32 s7, v11 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v12 +; SI-NEXT: v_writelane_b32 v14, s39, 7 +; SI-NEXT: v_readfirstlane_b32 s14, v1 +; SI-NEXT: v_readfirstlane_b32 s15, v2 +; SI-NEXT: v_readfirstlane_b32 s12, v3 +; SI-NEXT: v_readfirstlane_b32 s13, v4 +; SI-NEXT: v_readfirstlane_b32 s10, v5 +; SI-NEXT: v_readfirstlane_b32 s11, v6 +; SI-NEXT: v_readfirstlane_b32 s8, v7 +; SI-NEXT: v_readfirstlane_b32 s9, v8 +; SI-NEXT: v_readfirstlane_b32 s6, v9 +; SI-NEXT: v_readfirstlane_b32 s7, v10 +; SI-NEXT: v_readfirstlane_b32 s4, v11 +; SI-NEXT: s_and_b64 s[40:41], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v12 +; SI-NEXT: v_writelane_b32 v14, s48, 8 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s28 -; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v10, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v13, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s29, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s27, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s23, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s17, v13, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s44, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: s_lshr_b32 s47, s40, 16 -; SI-NEXT: s_lshr_b32 s56, s29, 16 -; SI-NEXT: s_lshr_b32 s57, s27, 16 -; SI-NEXT: s_lshr_b32 s58, s25, 16 -; SI-NEXT: s_lshr_b32 s59, s23, 16 -; SI-NEXT: s_lshr_b32 s60, s21, 16 -; SI-NEXT: s_lshr_b32 s61, s19, 16 -; SI-NEXT: s_lshr_b32 s62, s17, 16 +; SI-NEXT: s_lshr_b32 s92, s5, 16 +; SI-NEXT: s_lshr_b32 s93, s7, 16 +; SI-NEXT: s_lshr_b32 s94, s9, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 16 +; SI-NEXT: s_lshr_b32 s30, s13, 16 +; SI-NEXT: s_lshr_b32 s31, s15, 16 +; SI-NEXT: s_lshr_b32 s34, s29, 16 +; SI-NEXT: s_lshr_b32 s35, s27, 16 +; SI-NEXT: s_lshr_b32 s36, s25, 16 +; SI-NEXT: s_lshr_b32 s37, s23, 16 +; SI-NEXT: s_lshr_b32 s38, s21, 16 +; SI-NEXT: s_lshr_b32 s39, s19, 16 +; SI-NEXT: s_lshr_b32 s48, s17, 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s41, s41, 3 ; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s40, s40, 3 ; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s28 -; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v10, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v13, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s29, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s27, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s23, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s17, v13, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s44, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: s_lshr_b32 s47, s40, 16 -; SI-NEXT: s_lshr_b32 s56, s29, 16 -; SI-NEXT: s_lshr_b32 s57, s27, 16 -; SI-NEXT: s_lshr_b32 s58, s25, 16 -; SI-NEXT: s_lshr_b32 s59, s23, 16 -; SI-NEXT: s_lshr_b32 s60, s21, 16 -; SI-NEXT: s_lshr_b32 s61, s19, 16 -; SI-NEXT: s_lshr_b32 s62, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b32 s92, s5, 16 +; SI-NEXT: s_lshr_b32 s93, s7, 16 +; SI-NEXT: s_lshr_b32 s94, s9, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 16 +; SI-NEXT: s_lshr_b32 s30, s13, 16 +; SI-NEXT: s_lshr_b32 s31, s15, 16 +; SI-NEXT: s_lshr_b32 s34, s29, 16 +; SI-NEXT: s_lshr_b32 s35, s27, 16 +; SI-NEXT: s_lshr_b32 s36, s25, 16 +; SI-NEXT: s_lshr_b32 s37, s23, 16 +; SI-NEXT: s_lshr_b32 s38, s21, 16 +; SI-NEXT: s_lshr_b32 s39, s19, 16 +; SI-NEXT: s_lshr_b32 s48, s17, 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: s_lshl_b32 s41, s88, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s41 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s78, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s76, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s95, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s93, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s48, v14, 8 +; SI-NEXT: v_readlane_b32 s39, v14, 7 +; SI-NEXT: v_readlane_b32 s38, v14, 6 +; SI-NEXT: v_readlane_b32 s37, v14, 5 +; SI-NEXT: v_readlane_b32 s36, v14, 4 +; SI-NEXT: v_readlane_b32 s35, v14, 3 +; SI-NEXT: v_readlane_b32 s34, v14, 2 +; SI-NEXT: v_readlane_b32 s31, v14, 1 +; SI-NEXT: v_readlane_b32 s30, v14, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr37 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr31 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v26i32_to_v52i16_scalar: @@ -13646,211 +13666,217 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; SI-NEXT: v_mov_b32_e32 v28, s16 -; SI-NEXT: v_mov_b32_e32 v27, s17 -; SI-NEXT: v_mov_b32_e32 v25, s18 +; SI-NEXT: v_mov_b32_e32 v25, s16 +; SI-NEXT: v_mov_b32_e32 v26, s17 +; SI-NEXT: v_mov_b32_e32 v23, s18 ; SI-NEXT: v_mov_b32_e32 v24, s19 -; SI-NEXT: v_mov_b32_e32 v21, s20 -; SI-NEXT: v_mov_b32_e32 v19, s21 -; SI-NEXT: v_mov_b32_e32 v22, s22 +; SI-NEXT: v_mov_b32_e32 v19, s20 +; SI-NEXT: v_mov_b32_e32 v20, s21 +; SI-NEXT: v_mov_b32_e32 v21, s22 +; SI-NEXT: v_mov_b32_e32 v22, s23 +; SI-NEXT: v_mov_b32_e32 v17, s24 +; SI-NEXT: v_mov_b32_e32 v18, s25 +; SI-NEXT: v_mov_b32_e32 v15, s26 +; SI-NEXT: v_mov_b32_e32 v16, s27 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v20, s23 -; SI-NEXT: v_mov_b32_e32 v18, s24 -; SI-NEXT: v_mov_b32_e32 v17, s25 -; SI-NEXT: v_mov_b32_e32 v16, s26 -; SI-NEXT: v_mov_b32_e32 v15, s27 -; SI-NEXT: v_mov_b32_e32 v14, s28 -; SI-NEXT: v_mov_b32_e32 v13, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v13, s28 +; SI-NEXT: v_mov_b32_e32 v14, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v23, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v26, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v31, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v35, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v37, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v48, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v50, v20, v22, 16 -; SI-NEXT: v_alignbit_b32 v52, v19, v21, 16 -; SI-NEXT: v_alignbit_b32 v54, v24, v25, 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v40, v27, v28, 16 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v26 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[25:26], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[27:28], v[11:12], 16 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: v_lshr_b64 v[29:30], v[7:8], 16 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 ; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 ; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_lshr_b64 v[32:33], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 +; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 ; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 ; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_alignbit_b32 v23, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v26, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v31, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v33, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v35, v13, v14, 16 -; SI-NEXT: v_alignbit_b32 v37, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v48, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v50, v20, v22, 16 -; SI-NEXT: v_alignbit_b32 v52, v19, v21, 16 -; SI-NEXT: v_alignbit_b32 v54, v24, v25, 16 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[25:26], 16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v40, v27, v28, 16 -; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v13 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v26 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_or_b32_e32 v28, v28, v40 -; SI-NEXT: buffer_store_dword v28, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v44 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v49 ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v25, v25, v35 +; SI-NEXT: buffer_store_dword v25, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v54 -; SI-NEXT: v_or_b32_e32 v25, v25, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v47 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v43 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v48 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v23, v23, v25 +; SI-NEXT: v_add_i32_e32 v25, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v52 -; SI-NEXT: v_or_b32_e32 v21, v21, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v21, v24, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v46 +; SI-NEXT: v_or_b32_e32 v23, v23, v24 +; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v42 -; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v50 -; SI-NEXT: v_or_b32_e32 v19, v19, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v38 +; SI-NEXT: v_or_b32_e32 v19, v19, v23 +; SI-NEXT: v_add_i32_e32 v23, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v19, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 +; SI-NEXT: v_or_b32_e32 v19, v19, v20 +; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v37 +; SI-NEXT: v_or_b32_e32 v19, v19, v20 +; SI-NEXT: v_add_i32_e32 v20, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v44 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v34 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v43 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v37 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v53 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v42 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v35 -; SI-NEXT: v_or_b32_e32 v14, v14, v15 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v36 +; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v51 +; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v41 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32 ; SI-NEXT: v_or_b32_e32 v1, v1, v13 ; SI-NEXT: v_add_i32_e32 v13, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -13862,7 +13888,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -13874,7 +13900,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -13886,68 +13912,71 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v28 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr51 -; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr36 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr23 -; SI-NEXT: ; implicit-def: $vgpr32 +; SI-NEXT: ; implicit-def: $vgpr28 +; SI-NEXT: ; implicit-def: $vgpr27 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v26f32_to_v52i16_scalar: @@ -23073,313 +23102,333 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v13i64_to_v52i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v14, s30, 0 +; SI-NEXT: v_writelane_b32 v14, s31, 1 +; SI-NEXT: v_writelane_b32 v14, s34, 2 +; SI-NEXT: v_writelane_b32 v14, s35, 3 +; SI-NEXT: v_writelane_b32 v14, s36, 4 +; SI-NEXT: v_writelane_b32 v14, s37, 5 +; SI-NEXT: v_writelane_b32 v14, s38, 6 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13 -; SI-NEXT: v_readfirstlane_b32 s41, v1 -; SI-NEXT: v_readfirstlane_b32 s40, v2 -; SI-NEXT: v_readfirstlane_b32 s15, v3 -; SI-NEXT: v_readfirstlane_b32 s14, v4 -; SI-NEXT: v_readfirstlane_b32 s13, v5 -; SI-NEXT: v_readfirstlane_b32 s12, v6 -; SI-NEXT: v_readfirstlane_b32 s11, v7 -; SI-NEXT: v_readfirstlane_b32 s10, v8 -; SI-NEXT: v_readfirstlane_b32 s9, v9 -; SI-NEXT: v_readfirstlane_b32 s8, v10 -; SI-NEXT: v_readfirstlane_b32 s7, v11 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v12 +; SI-NEXT: v_writelane_b32 v14, s39, 7 +; SI-NEXT: v_readfirstlane_b32 s14, v1 +; SI-NEXT: v_readfirstlane_b32 s15, v2 +; SI-NEXT: v_readfirstlane_b32 s12, v3 +; SI-NEXT: v_readfirstlane_b32 s13, v4 +; SI-NEXT: v_readfirstlane_b32 s10, v5 +; SI-NEXT: v_readfirstlane_b32 s11, v6 +; SI-NEXT: v_readfirstlane_b32 s8, v7 +; SI-NEXT: v_readfirstlane_b32 s9, v8 +; SI-NEXT: v_readfirstlane_b32 s6, v9 +; SI-NEXT: v_readfirstlane_b32 s7, v10 +; SI-NEXT: v_readfirstlane_b32 s4, v11 +; SI-NEXT: s_and_b64 s[40:41], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v12 +; SI-NEXT: v_writelane_b32 v14, s48, 8 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s28 -; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v10, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v13, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s29, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s27, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s23, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s17, v13, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s44, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: s_lshr_b32 s47, s40, 16 -; SI-NEXT: s_lshr_b32 s56, s29, 16 -; SI-NEXT: s_lshr_b32 s57, s27, 16 -; SI-NEXT: s_lshr_b32 s58, s25, 16 -; SI-NEXT: s_lshr_b32 s59, s23, 16 -; SI-NEXT: s_lshr_b32 s60, s21, 16 -; SI-NEXT: s_lshr_b32 s61, s19, 16 -; SI-NEXT: s_lshr_b32 s62, s17, 16 +; SI-NEXT: s_lshr_b32 s92, s5, 16 +; SI-NEXT: s_lshr_b32 s93, s7, 16 +; SI-NEXT: s_lshr_b32 s94, s9, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 16 +; SI-NEXT: s_lshr_b32 s30, s13, 16 +; SI-NEXT: s_lshr_b32 s31, s15, 16 +; SI-NEXT: s_lshr_b32 s34, s29, 16 +; SI-NEXT: s_lshr_b32 s35, s27, 16 +; SI-NEXT: s_lshr_b32 s36, s25, 16 +; SI-NEXT: s_lshr_b32 s37, s23, 16 +; SI-NEXT: s_lshr_b32 s38, s21, 16 +; SI-NEXT: s_lshr_b32 s39, s19, 16 +; SI-NEXT: s_lshr_b32 s48, s17, 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s28 -; SI-NEXT: v_mov_b32_e32 v8, s26 -; SI-NEXT: v_mov_b32_e32 v9, s24 -; SI-NEXT: v_mov_b32_e32 v10, s22 -; SI-NEXT: v_mov_b32_e32 v11, s20 -; SI-NEXT: v_mov_b32_e32 v12, s18 -; SI-NEXT: v_mov_b32_e32 v13, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s29, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s27, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s25, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s23, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s21, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s19, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s17, v13, 16 -; SI-NEXT: s_lshr_b32 s42, s6, 16 -; SI-NEXT: s_lshr_b32 s43, s8, 16 -; SI-NEXT: s_lshr_b32 s44, s10, 16 -; SI-NEXT: s_lshr_b32 s45, s12, 16 -; SI-NEXT: s_lshr_b32 s46, s14, 16 -; SI-NEXT: s_lshr_b32 s47, s40, 16 -; SI-NEXT: s_lshr_b32 s56, s29, 16 -; SI-NEXT: s_lshr_b32 s57, s27, 16 -; SI-NEXT: s_lshr_b32 s58, s25, 16 -; SI-NEXT: s_lshr_b32 s59, s23, 16 -; SI-NEXT: s_lshr_b32 s60, s21, 16 -; SI-NEXT: s_lshr_b32 s61, s19, 16 -; SI-NEXT: s_lshr_b32 s62, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s92, s5, 16 +; SI-NEXT: s_lshr_b32 s93, s7, 16 +; SI-NEXT: s_lshr_b32 s94, s9, 16 +; SI-NEXT: s_lshr_b32 s95, s11, 16 +; SI-NEXT: s_lshr_b32 s30, s13, 16 +; SI-NEXT: s_lshr_b32 s31, s15, 16 +; SI-NEXT: s_lshr_b32 s34, s29, 16 +; SI-NEXT: s_lshr_b32 s35, s27, 16 +; SI-NEXT: s_lshr_b32 s36, s25, 16 +; SI-NEXT: s_lshr_b32 s37, s23, 16 +; SI-NEXT: s_lshr_b32 s38, s21, 16 +; SI-NEXT: s_lshr_b32 s39, s19, 16 +; SI-NEXT: s_lshr_b32 s48, s17, 16 +; SI-NEXT: s_lshr_b64 s[40:41], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; SI-NEXT: buffer_store_dword v13, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: s_lshl_b32 s41, s88, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s41 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s78, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s76, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_waitcnt expcnt(2) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s43, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s42, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s95, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s94, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s93, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s40, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s92, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s48, v14, 8 +; SI-NEXT: v_readlane_b32 s39, v14, 7 +; SI-NEXT: v_readlane_b32 s38, v14, 6 +; SI-NEXT: v_readlane_b32 s37, v14, 5 +; SI-NEXT: v_readlane_b32 s36, v14, 4 +; SI-NEXT: v_readlane_b32 s35, v14, 3 +; SI-NEXT: v_readlane_b32 s34, v14, 2 +; SI-NEXT: v_readlane_b32 s31, v14, 1 +; SI-NEXT: v_readlane_b32 s30, v14, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr48 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr39 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr38 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr37 +; SI-NEXT: ; implicit-def: $sgpr72 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr31 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr95 ; SI-NEXT: ; implicit-def: $sgpr44 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr43 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr93 +; SI-NEXT: ; implicit-def: $sgpr92 ; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr40 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v13i64_to_v52i16_scalar: @@ -31660,171 +31709,177 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v13, s28 ; SI-NEXT: v_mov_b32_e32 v14, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v28, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v31, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v32, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v34, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v37, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v39, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v52, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v54, v24, v23, 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v40, v26, v25, 16 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v26 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[25:26], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 ; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v28, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v29, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v30, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v31, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v32, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v34, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v37, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v39, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v49, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v52, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v54, v24, v23, 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_lshr_b64 v[27:28], v[11:12], 16 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_lshr_b64 v[28:29], v[9:10], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_lshr_b64 v[29:30], v[7:8], 16 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_lshr_b64 v[30:31], v[5:6], 16 +; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[31:32], v[3:4], 16 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 +; SI-NEXT: v_lshr_b64 v[32:33], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[25:26], 16 +; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v2 +; SI-NEXT: s_waitcnt expcnt(6) +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v16 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v40, v26, v25, 16 -; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v18 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v18 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v26 ; SI-NEXT: .LBB49_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v49 ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_or_b32_e32 v25, v25, v40 +; SI-NEXT: v_or_b32_e32 v25, v25, v35 ; SI-NEXT: buffer_store_dword v25, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v44 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v47 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 ; SI-NEXT: v_add_i32_e32 v26, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v48 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v46 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v38 ; SI-NEXT: v_or_b32_e32 v19, v19, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v19, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v21 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v37 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v44 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v34 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v43 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v33 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v42 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v34 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v36 ; SI-NEXT: v_or_b32_e32 v13, v13, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v13, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v13, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v41 ; SI-NEXT: v_or_b32_e32 v13, v13, v14 ; SI-NEXT: v_add_i32_e32 v14, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen @@ -31836,7 +31891,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v13, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31848,7 +31903,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31860,7 +31915,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31872,7 +31927,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31884,7 +31939,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -31896,44 +31951,47 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr47 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr45 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr34 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr37 +; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr53 -; SI-NEXT: ; implicit-def: $vgpr34 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr28 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr33 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v13f64_to_v52i16_scalar: @@ -43234,482 +43292,533 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i ; SI-LABEL: bitcast_v52f16_to_v52i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:4 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:24 ; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:12 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:24 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v3 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:28 +; SI-NEXT: v_cvt_f16_f32_e32 v62, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v10 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:32 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v18, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s28 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v14 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v18 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v44, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v51, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v3, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v46, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v45, s23 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v61, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v11, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v43, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v41, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v54, s29 +; SI-NEXT: s_waitcnt vmcnt(10) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: s_waitcnt vmcnt(9) +; SI-NEXT: v_cvt_f16_f32_e32 v53, v32 ; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v33 ; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v29, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 ; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v44 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v28 ; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v36 ; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v4, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v38 ; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f16_f32_e32 v28, v47 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v5, v56 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v57 -; SI-NEXT: v_cvt_f16_f32_e32 v44, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v43, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v42, s25 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v58 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v39 +; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v39, s25 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v44, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v44, 0x38000000, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v44 -; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v43 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v43 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v42 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v40 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v53 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v42 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v54 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v53 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v40 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v51 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v43 +; SI-NEXT: v_mov_b32_e32 v38, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v41 +; SI-NEXT: v_mov_b32_e32 v28, v7 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v54 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v36, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v39 +; SI-NEXT: v_mov_b32_e32 v9, v15 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v39, v7, v19 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v38 +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v38, v7, v15 +; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v1, v52 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v50 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_mov_b32_e32 v29, v11 +; SI-NEXT: v_or_b32_e32 v5, v5, v23 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:108 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v29 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v37 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v5, v5, v25 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v3, v33 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v35 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 ; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_or_b32_e32 v5, v5, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_or_b32_e32 v3, v3, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v8 -; SI-NEXT: v_or_b32_e32 v9, v9, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v7 -; SI-NEXT: v_or_b32_e32 v14, v14, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v10 -; SI-NEXT: v_or_b32_e32 v12, v12, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v16 -; SI-NEXT: v_or_b32_e32 v17, v17, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v31 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 -; SI-NEXT: v_or_b32_e32 v36, v36, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 ; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v32 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v34, v34, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v29 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v48 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v49, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 ; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 ; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 ; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 ; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 ; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v49 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_or_b32_e32 v48, v29, v48 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_or_b32_e32 v50, v50, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v18 -; SI-NEXT: v_or_b32_e32 v19, v19, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v22 -; SI-NEXT: v_or_b32_e32 v25, v25, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v23 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v45 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v44 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v42 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v40 -; SI-NEXT: v_or_b32_e32 v24, v24, v29 -; SI-NEXT: v_or_b32_e32 v27, v27, v43 -; SI-NEXT: v_or_b32_e32 v26, v26, v45 -; SI-NEXT: v_or_b32_e32 v21, v21, v30 -; SI-NEXT: v_or_b32_e32 v20, v20, v41 -; SI-NEXT: v_or_b32_e32 v49, v49, v46 -; SI-NEXT: v_or_b32_e32 v37, v37, v55 -; SI-NEXT: v_or_b32_e32 v35, v35, v54 -; SI-NEXT: v_or_b32_e32 v33, v33, v47 -; SI-NEXT: v_or_b32_e32 v15, v15, v52 -; SI-NEXT: v_or_b32_e32 v13, v13, v51 -; SI-NEXT: v_or_b32_e32 v11, v11, v56 -; SI-NEXT: v_or_b32_e32 v6, v6, v28 -; SI-NEXT: v_or_b32_e32 v4, v4, v57 -; SI-NEXT: v_alignbit_b32 v44, v24, v43, 16 -; SI-NEXT: v_alignbit_b32 v43, v25, v45, 16 -; SI-NEXT: v_alignbit_b32 v42, v19, v30, 16 -; SI-NEXT: v_alignbit_b32 v30, v50, v41, 16 -; SI-NEXT: v_alignbit_b32 v41, v48, v46, 16 -; SI-NEXT: v_alignbit_b32 v40, v34, v55, 16 -; SI-NEXT: v_alignbit_b32 v55, v36, v54, 16 -; SI-NEXT: v_alignbit_b32 v54, v17, v47, 16 -; SI-NEXT: v_alignbit_b32 v53, v12, v52, 16 -; SI-NEXT: v_alignbit_b32 v52, v14, v51, 16 -; SI-NEXT: v_alignbit_b32 v51, v9, v56, 16 -; SI-NEXT: v_alignbit_b32 v29, v3, v28, 16 -; SI-NEXT: v_alignbit_b32 v28, v5, v57, 16 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v28 +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_or_b32_e32 v7, v7, v17 +; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:100 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v44 +; SI-NEXT: v_or_b32_e32 v5, v5, v21 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:104 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v56 +; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v9 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v42 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v5, v5, v11 +; SI-NEXT: v_or_b32_e32 v56, v7, v13 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v31 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v40 +; SI-NEXT: v_or_b32_e32 v36, v1, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v55 +; SI-NEXT: v_or_b32_e32 v37, v28, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v53 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v35, v3, v5 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v34 +; SI-NEXT: v_or_b32_e32 v33, v28, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v30 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v27 +; SI-NEXT: v_or_b32_e32 v31, v29, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v29, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v30 +; SI-NEXT: v_or_b32_e32 v2, v2, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v57 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29 +; SI-NEXT: v_or_b32_e32 v4, v4, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v51 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v60 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v57 +; SI-NEXT: v_or_b32_e32 v6, v6, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v32 +; SI-NEXT: v_or_b32_e32 v8, v8, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v47 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v60 +; SI-NEXT: v_or_b32_e32 v10, v10, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v63 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v59 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v47 +; SI-NEXT: v_or_b32_e32 v12, v12, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v63 +; SI-NEXT: v_or_b32_e32 v14, v14, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v62 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v59 +; SI-NEXT: v_or_b32_e32 v18, v18, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v58 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v62, v27 +; SI-NEXT: v_lshr_b64 v[50:51], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v62 +; SI-NEXT: v_or_b32_e32 v22, v22, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v61 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v58 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v45 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v28 +; SI-NEXT: v_lshr_b64 v[54:55], v[25:26], 16 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v45, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v46 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v61 +; SI-NEXT: v_lshr_b64 v[52:53], v[21:22], 16 +; SI-NEXT: v_or_b32_e32 v16, v16, v28 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v45 +; SI-NEXT: v_or_b32_e32 v20, v20, v27 +; SI-NEXT: v_mov_b32_e32 v53, v33 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v46 +; SI-NEXT: v_or_b32_e32 v24, v24, v27 +; SI-NEXT: v_lshr_b64 v[43:44], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[7:8], 16 +; SI-NEXT: v_mov_b32_e32 v7, v56 +; SI-NEXT: v_lshr_b64 v[55:56], v[3:4], 16 +; SI-NEXT: v_mov_b32_e32 v44, v37 +; SI-NEXT: v_lshr_b64 v[41:42], v[19:20], 16 +; SI-NEXT: v_mov_b32_e32 v19, v39 +; SI-NEXT: v_lshr_b64 v[39:40], v[15:16], 16 +; SI-NEXT: v_mov_b32_e32 v15, v38 +; SI-NEXT: v_lshr_b64 v[37:38], v[11:12], 16 +; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v42, v36 +; SI-NEXT: v_mov_b32_e32 v40, v35 +; SI-NEXT: v_mov_b32_e32 v51, v32 +; SI-NEXT: v_lshr_b64 v[48:49], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[9:10], 16 +; SI-NEXT: v_mov_b32_e32 v34, v31 +; SI-NEXT: v_lshr_b64 v[31:32], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[1:2], 16 +; SI-NEXT: v_mov_b32_e32 v32, v29 ; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44 -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; SI-NEXT: v_or_b32_e32 v27, v27, v44 -; SI-NEXT: v_or_b32_e32 v23, v24, v23 -; SI-NEXT: v_add_i32_e32 v24, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v43 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v43 -; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v46 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22 -; SI-NEXT: v_or_b32_e32 v22, v23, v22 -; SI-NEXT: v_add_i32_e32 v23, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v41 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v19 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v42 -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; SI-NEXT: v_or_b32_e32 v21, v21, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v18, v19, v18 -; SI-NEXT: v_add_i32_e32 v19, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v30 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v49 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v41 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v39 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v39 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v15 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v40 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v32 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v35 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v55 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v31 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v54 -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 -; SI-NEXT: v_or_b32_e32 v16, v17, v16 -; SI-NEXT: v_add_i32_e32 v17, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v11 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v53 -; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; SI-NEXT: v_or_b32_e32 v15, v15, v16 -; SI-NEXT: v_add_i32_e32 v16, vcc, 64, v0 -; SI-NEXT: v_or_b32_e32 v10, v12, v10 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v52 -; SI-NEXT: v_or_b32_e32 v10, v10, v12 -; SI-NEXT: v_add_i32_e32 v12, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v10, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; SI-NEXT: v_or_b32_e32 v7, v10, v7 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v62 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v51 -; SI-NEXT: v_or_b32_e32 v7, v7, v10 -; SI-NEXT: v_add_i32_e32 v10, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v7, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; SI-NEXT: v_or_b32_e32 v7, v7, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v29 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x58, v0 -; SI-NEXT: v_or_b32_e32 v2, v3, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v7 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v56 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v35 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v42 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v33 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v51 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v40 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v31 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v57 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v53 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v28 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v2, 0xffff, v5 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v2, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:92 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB59_4: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll index 099642646f8cc..8ee5b966f40b8 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll @@ -3637,337 +3637,368 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v28i32_to_v56i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v16, s30, 0 +; SI-NEXT: v_writelane_b32 v16, s31, 1 +; SI-NEXT: v_writelane_b32 v16, s34, 2 +; SI-NEXT: v_writelane_b32 v16, s35, 3 +; SI-NEXT: v_writelane_b32 v16, s36, 4 +; SI-NEXT: v_writelane_b32 v16, s37, 5 +; SI-NEXT: v_writelane_b32 v16, s38, 6 +; SI-NEXT: v_writelane_b32 v16, s39, 7 +; SI-NEXT: v_writelane_b32 v16, s48, 8 +; SI-NEXT: v_writelane_b32 v16, s49, 9 +; SI-NEXT: v_writelane_b32 v16, s50, 10 +; SI-NEXT: v_writelane_b32 v16, s51, 11 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15 -; SI-NEXT: v_readfirstlane_b32 s43, v1 -; SI-NEXT: v_readfirstlane_b32 s42, v2 -; SI-NEXT: v_readfirstlane_b32 s41, v3 -; SI-NEXT: v_readfirstlane_b32 s40, v4 -; SI-NEXT: v_readfirstlane_b32 s15, v5 -; SI-NEXT: v_readfirstlane_b32 s14, v6 -; SI-NEXT: v_readfirstlane_b32 s13, v7 -; SI-NEXT: v_readfirstlane_b32 s12, v8 -; SI-NEXT: v_readfirstlane_b32 s11, v9 -; SI-NEXT: v_readfirstlane_b32 s10, v10 -; SI-NEXT: v_readfirstlane_b32 s9, v11 -; SI-NEXT: v_readfirstlane_b32 s8, v12 -; SI-NEXT: v_readfirstlane_b32 s7, v13 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v14 +; SI-NEXT: v_writelane_b32 v16, s52, 12 +; SI-NEXT: v_readfirstlane_b32 s40, v1 +; SI-NEXT: v_readfirstlane_b32 s41, v2 +; SI-NEXT: v_readfirstlane_b32 s14, v3 +; SI-NEXT: v_readfirstlane_b32 s15, v4 +; SI-NEXT: v_readfirstlane_b32 s12, v5 +; SI-NEXT: v_readfirstlane_b32 s13, v6 +; SI-NEXT: v_readfirstlane_b32 s10, v7 +; SI-NEXT: v_readfirstlane_b32 s11, v8 +; SI-NEXT: v_readfirstlane_b32 s8, v9 +; SI-NEXT: v_readfirstlane_b32 s9, v10 +; SI-NEXT: v_readfirstlane_b32 s6, v11 +; SI-NEXT: v_readfirstlane_b32 s7, v12 +; SI-NEXT: v_readfirstlane_b32 s4, v13 +; SI-NEXT: s_and_b64 s[42:43], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: v_writelane_b32 v16, s53, 13 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v11, s22 -; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v14, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s29, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s23, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s21, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s17, v14, 16 -; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_lshr_b32 s45, s8, 16 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s47, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s14, 16 -; SI-NEXT: s_lshr_b32 s57, s40, 16 -; SI-NEXT: s_lshr_b32 s58, s42, 16 -; SI-NEXT: s_lshr_b32 s59, s29, 16 -; SI-NEXT: s_lshr_b32 s60, s27, 16 -; SI-NEXT: s_lshr_b32 s61, s25, 16 -; SI-NEXT: s_lshr_b32 s62, s23, 16 -; SI-NEXT: s_lshr_b32 s63, s21, 16 -; SI-NEXT: s_lshr_b32 s72, s19, 16 -; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_lshr_b32 s30, s5, 16 +; SI-NEXT: s_lshr_b32 s31, s7, 16 +; SI-NEXT: s_lshr_b32 s34, s9, 16 +; SI-NEXT: s_lshr_b32 s35, s11, 16 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s37, s15, 16 +; SI-NEXT: s_lshr_b32 s38, s41, 16 +; SI-NEXT: s_lshr_b32 s39, s29, 16 +; SI-NEXT: s_lshr_b32 s48, s27, 16 +; SI-NEXT: s_lshr_b32 s49, s25, 16 +; SI-NEXT: s_lshr_b32 s50, s23, 16 +; SI-NEXT: s_lshr_b32 s51, s21, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s43, s43, 3 ; SI-NEXT: s_add_i32 s41, s41, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s42, s42, 3 ; SI-NEXT: s_add_i32 s40, s40, 3 +; SI-NEXT: s_add_i32 s15, s15, 3 ; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v11, s22 -; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v14, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s29, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s23, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s21, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s17, v14, 16 -; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_lshr_b32 s45, s8, 16 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s47, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s14, 16 -; SI-NEXT: s_lshr_b32 s57, s40, 16 -; SI-NEXT: s_lshr_b32 s58, s42, 16 -; SI-NEXT: s_lshr_b32 s59, s29, 16 -; SI-NEXT: s_lshr_b32 s60, s27, 16 -; SI-NEXT: s_lshr_b32 s61, s25, 16 -; SI-NEXT: s_lshr_b32 s62, s23, 16 -; SI-NEXT: s_lshr_b32 s63, s21, 16 -; SI-NEXT: s_lshr_b32 s72, s19, 16 -; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[26:27], 16 +; SI-NEXT: s_lshr_b32 s30, s5, 16 +; SI-NEXT: s_lshr_b32 s31, s7, 16 +; SI-NEXT: s_lshr_b32 s34, s9, 16 +; SI-NEXT: s_lshr_b32 s35, s11, 16 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s37, s15, 16 +; SI-NEXT: s_lshr_b32 s38, s41, 16 +; SI-NEXT: s_lshr_b32 s39, s29, 16 +; SI-NEXT: s_lshr_b32 s48, s27, 16 +; SI-NEXT: s_lshr_b32 s49, s25, 16 +; SI-NEXT: s_lshr_b32 s50, s23, 16 +; SI-NEXT: s_lshr_b32 s51, s21, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: buffer_store_dword v14, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: s_lshl_b32 s43, s92, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s43 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s90, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_lshl_b32 s16, s88, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s53, v16, 13 +; SI-NEXT: v_readlane_b32 s52, v16, 12 +; SI-NEXT: v_readlane_b32 s51, v16, 11 +; SI-NEXT: v_readlane_b32 s50, v16, 10 +; SI-NEXT: v_readlane_b32 s49, v16, 9 +; SI-NEXT: v_readlane_b32 s48, v16, 8 +; SI-NEXT: v_readlane_b32 s39, v16, 7 +; SI-NEXT: v_readlane_b32 s38, v16, 6 +; SI-NEXT: v_readlane_b32 s37, v16, 5 +; SI-NEXT: v_readlane_b32 s36, v16, 4 +; SI-NEXT: v_readlane_b32 s35, v16, 3 +; SI-NEXT: v_readlane_b32 s34, v16, 2 +; SI-NEXT: v_readlane_b32 s31, v16, 1 +; SI-NEXT: v_readlane_b32 s30, v16, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v28i32_to_v56i16_scalar: @@ -14769,223 +14800,227 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15 -; SI-NEXT: v_mov_b32_e32 v30, s16 -; SI-NEXT: v_mov_b32_e32 v29, s17 -; SI-NEXT: v_mov_b32_e32 v25, s18 -; SI-NEXT: v_mov_b32_e32 v23, s19 -; SI-NEXT: v_mov_b32_e32 v28, s20 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v27, s16 +; SI-NEXT: v_mov_b32_e32 v28, s17 +; SI-NEXT: v_mov_b32_e32 v23, s18 +; SI-NEXT: v_mov_b32_e32 v24, s19 +; SI-NEXT: v_mov_b32_e32 v25, s20 ; SI-NEXT: v_mov_b32_e32 v26, s21 -; SI-NEXT: v_mov_b32_e32 v24, s22 +; SI-NEXT: v_mov_b32_e32 v21, s22 ; SI-NEXT: v_mov_b32_e32 v22, s23 -; SI-NEXT: v_mov_b32_e32 v20, s24 -; SI-NEXT: v_mov_b32_e32 v19, s25 -; SI-NEXT: v_mov_b32_e32 v18, s26 -; SI-NEXT: v_mov_b32_e32 v17, s27 -; SI-NEXT: v_mov_b32_e32 v16, s28 -; SI-NEXT: v_mov_b32_e32 v15, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v19, s24 +; SI-NEXT: v_mov_b32_e32 v20, s25 +; SI-NEXT: v_mov_b32_e32 v17, s26 +; SI-NEXT: v_mov_b32_e32 v18, s27 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v15, s28 +; SI-NEXT: v_mov_b32_e32 v16, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v21, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v31, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v32, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v37, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v39, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v49, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v51, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v54, v22, v24, 16 -; SI-NEXT: v_alignbit_b32 v40, v26, v28, 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v42, v23, v25, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v18 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v44, v29, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v20 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[27:28], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 -; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 -; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[30:31], v[11:12], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 +; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 ; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 +; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 +; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 ; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 ; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_lshr_b64 v[34:35], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 ; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 ; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 -; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 -; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_alignbit_b32 v21, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v27, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v31, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v32, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v37, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v39, v15, v16, 16 -; SI-NEXT: v_alignbit_b32 v49, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v51, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v54, v22, v24, 16 -; SI-NEXT: v_alignbit_b32 v40, v26, v28, 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[27:28], 16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v42, v23, v25, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v18 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v44, v29, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v15 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v19 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v20 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v23 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v29 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v30, 0xffff, v30 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44 -; SI-NEXT: v_or_b32_e32 v30, v30, v44 -; SI-NEXT: buffer_store_dword v30, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v56 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v52 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 +; SI-NEXT: v_or_b32_e32 v27, v27, v37 +; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v59 +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v42 -; SI-NEXT: v_or_b32_e32 v25, v25, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v25, v29, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v51 ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v23, v23, v27 +; SI-NEXT: v_add_i32_e32 v27, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v23, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v47 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v58 +; SI-NEXT: v_or_b32_e32 v23, v23, v24 +; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v40 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v50 +; SI-NEXT: v_and_b32_e32 v24, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v23, v24, v23 +; SI-NEXT: v_add_i32_e32 v24, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v46 -; SI-NEXT: v_or_b32_e32 v23, v23, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v57 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v24, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22 +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v45 -; SI-NEXT: v_or_b32_e32 v22, v22, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v22, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v49 +; SI-NEXT: v_or_b32_e32 v21, v21, v23 +; SI-NEXT: v_add_i32_e32 v23, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v51 -; SI-NEXT: v_or_b32_e32 v20, v20, v22 -; SI-NEXT: v_add_i32_e32 v22, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v20, v22, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v56 +; SI-NEXT: v_or_b32_e32 v21, v21, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 +; SI-NEXT: v_or_b32_e32 v19, v19, v21 +; SI-NEXT: v_add_i32_e32 v21, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v47 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v49 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v41 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v46 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v39 -; SI-NEXT: v_or_b32_e32 v16, v16, v17 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35 +; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v55 +; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v45 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -14997,7 +15032,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -15009,7 +15044,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -15021,7 +15056,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -15033,74 +15068,77 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v21 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v29 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr49 +; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr38 -; SI-NEXT: ; implicit-def: $vgpr27 -; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr21 -; SI-NEXT: ; implicit-def: $vgpr35 +; SI-NEXT: ; implicit-def: $vgpr30 +; SI-NEXT: ; implicit-def: $vgpr29 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v28f32_to_v56i16_scalar: @@ -25036,337 +25074,368 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v14i64_to_v56i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v16, s30, 0 +; SI-NEXT: v_writelane_b32 v16, s31, 1 +; SI-NEXT: v_writelane_b32 v16, s34, 2 +; SI-NEXT: v_writelane_b32 v16, s35, 3 +; SI-NEXT: v_writelane_b32 v16, s36, 4 +; SI-NEXT: v_writelane_b32 v16, s37, 5 +; SI-NEXT: v_writelane_b32 v16, s38, 6 +; SI-NEXT: v_writelane_b32 v16, s39, 7 +; SI-NEXT: v_writelane_b32 v16, s48, 8 +; SI-NEXT: v_writelane_b32 v16, s49, 9 +; SI-NEXT: v_writelane_b32 v16, s50, 10 +; SI-NEXT: v_writelane_b32 v16, s51, 11 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15 -; SI-NEXT: v_readfirstlane_b32 s43, v1 -; SI-NEXT: v_readfirstlane_b32 s42, v2 -; SI-NEXT: v_readfirstlane_b32 s41, v3 -; SI-NEXT: v_readfirstlane_b32 s40, v4 -; SI-NEXT: v_readfirstlane_b32 s15, v5 -; SI-NEXT: v_readfirstlane_b32 s14, v6 -; SI-NEXT: v_readfirstlane_b32 s13, v7 -; SI-NEXT: v_readfirstlane_b32 s12, v8 -; SI-NEXT: v_readfirstlane_b32 s11, v9 -; SI-NEXT: v_readfirstlane_b32 s10, v10 -; SI-NEXT: v_readfirstlane_b32 s9, v11 -; SI-NEXT: v_readfirstlane_b32 s8, v12 -; SI-NEXT: v_readfirstlane_b32 s7, v13 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v14 +; SI-NEXT: v_writelane_b32 v16, s52, 12 +; SI-NEXT: v_readfirstlane_b32 s40, v1 +; SI-NEXT: v_readfirstlane_b32 s41, v2 +; SI-NEXT: v_readfirstlane_b32 s14, v3 +; SI-NEXT: v_readfirstlane_b32 s15, v4 +; SI-NEXT: v_readfirstlane_b32 s12, v5 +; SI-NEXT: v_readfirstlane_b32 s13, v6 +; SI-NEXT: v_readfirstlane_b32 s10, v7 +; SI-NEXT: v_readfirstlane_b32 s11, v8 +; SI-NEXT: v_readfirstlane_b32 s8, v9 +; SI-NEXT: v_readfirstlane_b32 s9, v10 +; SI-NEXT: v_readfirstlane_b32 s6, v11 +; SI-NEXT: v_readfirstlane_b32 s7, v12 +; SI-NEXT: v_readfirstlane_b32 s4, v13 +; SI-NEXT: s_and_b64 s[42:43], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v14 +; SI-NEXT: v_writelane_b32 v16, s53, 13 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v11, s22 -; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v14, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s29, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s23, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s21, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s17, v14, 16 -; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_lshr_b32 s45, s8, 16 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s47, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s14, 16 -; SI-NEXT: s_lshr_b32 s57, s40, 16 -; SI-NEXT: s_lshr_b32 s58, s42, 16 -; SI-NEXT: s_lshr_b32 s59, s29, 16 -; SI-NEXT: s_lshr_b32 s60, s27, 16 -; SI-NEXT: s_lshr_b32 s61, s25, 16 -; SI-NEXT: s_lshr_b32 s62, s23, 16 -; SI-NEXT: s_lshr_b32 s63, s21, 16 -; SI-NEXT: s_lshr_b32 s72, s19, 16 -; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_lshr_b32 s30, s5, 16 +; SI-NEXT: s_lshr_b32 s31, s7, 16 +; SI-NEXT: s_lshr_b32 s34, s9, 16 +; SI-NEXT: s_lshr_b32 s35, s11, 16 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s37, s15, 16 +; SI-NEXT: s_lshr_b32 s38, s41, 16 +; SI-NEXT: s_lshr_b32 s39, s29, 16 +; SI-NEXT: s_lshr_b32 s48, s27, 16 +; SI-NEXT: s_lshr_b32 s49, s25, 16 +; SI-NEXT: s_lshr_b32 s50, s23, 16 +; SI-NEXT: s_lshr_b32 s51, s21, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 +; SI-NEXT: s_add_u32 s40, s40, 3 +; SI-NEXT: s_addc_u32 s41, s41, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s43, s43, 3 -; SI-NEXT: s_addc_u32 s42, s42, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s28 -; SI-NEXT: v_mov_b32_e32 v9, s26 -; SI-NEXT: v_mov_b32_e32 v10, s24 -; SI-NEXT: v_mov_b32_e32 v11, s22 -; SI-NEXT: v_mov_b32_e32 v12, s20 -; SI-NEXT: v_mov_b32_e32 v13, s18 -; SI-NEXT: v_mov_b32_e32 v14, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s29, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s27, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s25, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s23, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s21, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s19, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s17, v14, 16 -; SI-NEXT: s_lshr_b32 s44, s6, 16 -; SI-NEXT: s_lshr_b32 s45, s8, 16 -; SI-NEXT: s_lshr_b32 s46, s10, 16 -; SI-NEXT: s_lshr_b32 s47, s12, 16 -; SI-NEXT: s_lshr_b32 s56, s14, 16 -; SI-NEXT: s_lshr_b32 s57, s40, 16 -; SI-NEXT: s_lshr_b32 s58, s42, 16 -; SI-NEXT: s_lshr_b32 s59, s29, 16 -; SI-NEXT: s_lshr_b32 s60, s27, 16 -; SI-NEXT: s_lshr_b32 s61, s25, 16 -; SI-NEXT: s_lshr_b32 s62, s23, 16 -; SI-NEXT: s_lshr_b32 s63, s21, 16 -; SI-NEXT: s_lshr_b32 s72, s19, 16 -; SI-NEXT: s_lshr_b32 s73, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s30, s5, 16 +; SI-NEXT: s_lshr_b32 s31, s7, 16 +; SI-NEXT: s_lshr_b32 s34, s9, 16 +; SI-NEXT: s_lshr_b32 s35, s11, 16 +; SI-NEXT: s_lshr_b32 s36, s13, 16 +; SI-NEXT: s_lshr_b32 s37, s15, 16 +; SI-NEXT: s_lshr_b32 s38, s41, 16 +; SI-NEXT: s_lshr_b32 s39, s29, 16 +; SI-NEXT: s_lshr_b32 s48, s27, 16 +; SI-NEXT: s_lshr_b32 s49, s25, 16 +; SI-NEXT: s_lshr_b32 s50, s23, 16 +; SI-NEXT: s_lshr_b32 s51, s21, 16 +; SI-NEXT: s_lshr_b32 s52, s19, 16 +; SI-NEXT: s_lshr_b32 s53, s17, 16 +; SI-NEXT: s_lshr_b64 s[42:43], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; SI-NEXT: buffer_store_dword v14, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: s_lshl_b32 s43, s92, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s43 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s90, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_lshl_b32 s16, s88, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s22, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s45, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s44, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s31, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s42, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s30, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s53, v16, 13 +; SI-NEXT: v_readlane_b32 s52, v16, 12 +; SI-NEXT: v_readlane_b32 s51, v16, 11 +; SI-NEXT: v_readlane_b32 s50, v16, 10 +; SI-NEXT: v_readlane_b32 s49, v16, 9 +; SI-NEXT: v_readlane_b32 s48, v16, 8 +; SI-NEXT: v_readlane_b32 s39, v16, 7 +; SI-NEXT: v_readlane_b32 s38, v16, 6 +; SI-NEXT: v_readlane_b32 s37, v16, 5 +; SI-NEXT: v_readlane_b32 s36, v16, 4 +; SI-NEXT: v_readlane_b32 s35, v16, 3 +; SI-NEXT: v_readlane_b32 s34, v16, 2 +; SI-NEXT: v_readlane_b32 s31, v16, 1 +; SI-NEXT: v_readlane_b32 s30, v16, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr51 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr50 +; SI-NEXT: ; implicit-def: $sgpr76 +; SI-NEXT: ; implicit-def: $sgpr49 +; SI-NEXT: ; implicit-def: $sgpr74 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr35 ; SI-NEXT: ; implicit-def: $sgpr46 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr45 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr34 ; SI-NEXT: ; implicit-def: $sgpr44 +; SI-NEXT: ; implicit-def: $sgpr31 +; SI-NEXT: ; implicit-def: $sgpr42 +; SI-NEXT: ; implicit-def: $sgpr30 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v14i64_to_v56i16_scalar: @@ -34418,194 +34487,198 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v15, s28 ; SI-NEXT: v_mov_b32_e32 v16, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v29, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v30, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v31, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v32, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v36, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v38, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v48, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v51, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v53, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v40, v26, v25, 16 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[30:31], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v42, v24, v23, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v18 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v44, v28, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v20 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[27:28], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: v_lshr_b64 v[29:30], v[13:14], 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 ; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_lshr_b64 v[30:31], v[11:12], 16 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_lshr_b64 v[31:32], v[9:10], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 +; SI-NEXT: v_lshr_b64 v[32:33], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 ; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 ; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 -; SI-NEXT: v_alignbit_b32 v29, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v30, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v31, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v32, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v33, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v36, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v38, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v48, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v51, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v53, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v40, v26, v25, 16 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_lshr_b64 v[34:35], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[27:28], 16 +; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v42, v24, v23, 16 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v16 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v18 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v44, v28, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v20 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v20 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v22 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v28 ; SI-NEXT: .LBB49_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v52 ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44 -; SI-NEXT: v_or_b32_e32 v27, v27, v44 +; SI-NEXT: v_or_b32_e32 v27, v27, v37 ; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v59 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 ; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v51 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v27 ; SI-NEXT: v_add_i32_e32 v27, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v23, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v58 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v40 -; SI-NEXT: v_or_b32_e32 v23, v23, v24 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v50 +; SI-NEXT: v_and_b32_e32 v24, 0xffff, v25 +; SI-NEXT: v_or_b32_e32 v23, v24, v23 ; SI-NEXT: v_add_i32_e32 v24, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v46 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v57 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v49 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v56 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v47 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v36 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v46 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35 ; SI-NEXT: v_or_b32_e32 v15, v15, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v15, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v15, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v45 ; SI-NEXT: v_or_b32_e32 v15, v15, v16 ; SI-NEXT: v_add_i32_e32 v16, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v36 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v15 ; SI-NEXT: v_add_i32_e32 v15, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v15, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34617,7 +34690,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34629,7 +34702,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34641,7 +34714,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34653,7 +34726,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34665,7 +34738,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v55 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -34677,50 +34750,53 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr59 +; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr58 +; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr57 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr56 -; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr46 -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr42 ; SI-NEXT: ; implicit-def: $vgpr41 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr55 -; SI-NEXT: ; implicit-def: $vgpr36 ; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr39 ; SI-NEXT: ; implicit-def: $vgpr30 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr29 -; SI-NEXT: ; implicit-def: $vgpr35 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v14f64_to_v56i16_scalar: @@ -47126,503 +47202,582 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:28 -; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:40 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v28, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v25, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v24, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v21, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v23, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v20, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v19, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v22, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v26 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v56, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v3, s16 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v61, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v58, s23 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v4 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v5 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v12 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v17 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v21 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v25 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v8, v29 +; SI-NEXT: v_cvt_f16_f32_e32 v47, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v45, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v20, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v43, s25 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s26 +; SI-NEXT: v_cvt_f16_f32_e32 v41, s29 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v46, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v44, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v34, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v37 +; SI-NEXT: v_cvt_f16_f32_e32 v36, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v35, v28 +; SI-NEXT: s_waitcnt vmcnt(13) +; SI-NEXT: v_cvt_f16_f32_e32 v33, v38 ; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v10, v53 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v49 ; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v53, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v62, v51 ; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v53 ; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v5, v47 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v8, v56 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v30, v57 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v7, v58 -; SI-NEXT: s_waitcnt vmcnt(5) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v59 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v57, v60 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f16_f32_e32 v29, v61 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v4, v62 -; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v63 -; SI-NEXT: v_cvt_f16_f32_e32 v56, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v47, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v46, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v45, s29 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v55 +; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v40 +; SI-NEXT: v_cvt_f16_f32_e32 v24, s18 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB59_3 ; SI-NEXT: .LBB59_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v33, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v47, v47 -; SI-NEXT: v_cvt_f32_f16_e32 v45, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v56, 0x38000000, v47 -; SI-NEXT: v_add_f32_e32 v45, 0x38000000, v45 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v33 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v56 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v45 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_mov_b32_e32 v61, v57 -; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v44, v43 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 +; SI-NEXT: v_mov_b32_e32 v28, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v45 +; SI-NEXT: v_mov_b32_e32 v51, v23 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v43 +; SI-NEXT: v_mov_b32_e32 v49, v19 +; SI-NEXT: v_mov_b32_e32 v53, v36 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v36, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v41 +; SI-NEXT: v_mov_b32_e32 v38, v15 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v28 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v39 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v54 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_or_b32_e32 v5, v5, v23 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v52 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v50 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v48 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v37 +; SI-NEXT: v_mov_b32_e32 v37, v11 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v35 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v62 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v33 +; SI-NEXT: v_mov_b32_e32 v33, v31 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v53 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v48 -; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v44, v42 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_or_b32_e32 v48, v33, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v40 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v33 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v41 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v53 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v33 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v54 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v30 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v61 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v54 -; SI-NEXT: v_or_b32_e32 v54, v33, v29 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 ; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_or_b32_e32 v4, v4, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_or_b32_e32 v7, v7, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v6, v6, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v11 -; SI-NEXT: v_or_b32_e32 v13, v13, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v9 -; SI-NEXT: v_or_b32_e32 v16, v16, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v14 -; SI-NEXT: v_or_b32_e32 v15, v15, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_cvt_f32_f16_e32 v36, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32 -; SI-NEXT: v_or_b32_e32 v34, v33, v34 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v31 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_or_b32_e32 v37, v37, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v51, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v52, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 +; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 +; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 +; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 +; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 +; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 ; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_lshlrev_b32_e32 v49, 16, v35 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 ; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_or_b32_e32 v36, v36, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v33 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v51 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v52 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v52 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 +; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v5, v28, v19 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v31 +; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 +; SI-NEXT: v_or_b32_e32 v28, v28, v15 +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 ; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_lshlrev_b32_e32 v51, 16, v49 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 ; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 +; SI-NEXT: v_or_b32_e32 v39, v5, v29 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_or_b32_e32 v5, v31, v25 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v38 ; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_or_b32_e32 v51, v33, v51 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 ; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_or_b32_e32 v52, v52, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v19 -; SI-NEXT: v_or_b32_e32 v20, v20, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v21 -; SI-NEXT: v_or_b32_e32 v27, v27, v33 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v56 -; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v44 -; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v41 -; SI-NEXT: v_or_b32_e32 v25, v25, v33 -; SI-NEXT: v_or_b32_e32 v28, v28, v47 -; SI-NEXT: v_or_b32_e32 v26, v26, v46 -; SI-NEXT: v_or_b32_e32 v23, v23, v45 -; SI-NEXT: v_or_b32_e32 v22, v22, v57 -; SI-NEXT: v_or_b32_e32 v50, v50, v43 -; SI-NEXT: v_or_b32_e32 v38, v38, v58 -; SI-NEXT: v_or_b32_e32 v3, v3, v40 -; SI-NEXT: v_or_b32_e32 v18, v18, v55 -; SI-NEXT: v_or_b32_e32 v17, v17, v59 -; SI-NEXT: v_or_b32_e32 v12, v12, v53 -; SI-NEXT: v_or_b32_e32 v10, v10, v30 -; SI-NEXT: v_or_b32_e32 v8, v8, v60 -; SI-NEXT: v_alignbit_b32 v56, v25, v47, 16 -; SI-NEXT: v_alignbit_b32 v47, v27, v46, 16 -; SI-NEXT: v_alignbit_b32 v46, v20, v45, 16 -; SI-NEXT: v_alignbit_b32 v45, v52, v57, 16 -; SI-NEXT: v_mov_b32_e32 v57, v54 -; SI-NEXT: v_alignbit_b32 v44, v51, v43, 16 -; SI-NEXT: v_alignbit_b32 v43, v36, v42, 16 -; SI-NEXT: v_alignbit_b32 v42, v37, v58, 16 -; SI-NEXT: v_alignbit_b32 v41, v34, v40, 16 -; SI-NEXT: v_alignbit_b32 v40, v15, v55, 16 -; SI-NEXT: v_alignbit_b32 v55, v16, v59, 16 -; SI-NEXT: v_alignbit_b32 v54, v13, v53, 16 -; SI-NEXT: v_alignbit_b32 v53, v6, v30, 16 -; SI-NEXT: v_alignbit_b32 v30, v7, v60, 16 -; SI-NEXT: v_alignbit_b32 v29, v4, v29, 16 -; SI-NEXT: .LBB59_3: ; %end -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v56 -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_or_b32_e32 v28, v28, v33 -; SI-NEXT: v_or_b32_e32 v24, v25, v24 -; SI-NEXT: v_add_i32_e32 v25, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v28, v0, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v47 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v27 -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; SI-NEXT: v_or_b32_e32 v21, v24, v21 -; SI-NEXT: v_add_i32_e32 v24, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v21, v24, s[0:3], 0 offen +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_or_b32_e32 v28, v28, v21 +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v21, 0xffff, v23 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v46 -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_or_b32_e32 v21, v21, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v19, v20, v19 -; SI-NEXT: v_add_i32_e32 v20, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v28, v37 +; SI-NEXT: v_or_b32_e32 v38, v31, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v56 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 +; SI-NEXT: v_or_b32_e32 v37, v28, v11 +; SI-NEXT: v_or_b32_e32 v62, v31, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v31, v33 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_or_b32_e32 v5, v5, v17 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v5, v63 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_or_b32_e32 v5, v5, v9 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v52 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v39 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_cvt_f32_f16_e32 v5, v46 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v5 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v44 +; SI-NEXT: v_or_b32_e32 v35, v28, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v34 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_or_b32_e32 v34, v1, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v31 +; SI-NEXT: v_or_b32_e32 v56, v28, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v42 +; SI-NEXT: v_cvt_f16_f32_e32 v32, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v33, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v32 +; SI-NEXT: v_or_b32_e32 v4, v4, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v60 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v33 +; SI-NEXT: v_or_b32_e32 v6, v6, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v57 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v36 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v60 +; SI-NEXT: v_or_b32_e32 v8, v8, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v57 +; SI-NEXT: v_or_b32_e32 v10, v10, v27 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v63 +; SI-NEXT: v_or_b32_e32 v12, v12, v28 +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v27 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v36 +; SI-NEXT: v_or_b32_e32 v14, v14, v27 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v28 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v50 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v44 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload +; SI-NEXT: v_or_b32_e32 v18, v18, v27 +; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_lshr_b64 v[52:53], v[17:18], 16 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v27 +; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v51 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v49 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28 +; SI-NEXT: v_or_b32_e32 v22, v22, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v59 +; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_lshr_b64 v[54:55], v[21:22], 16 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v28 +; SI-NEXT: v_cvt_f32_f16_e32 v28, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v30, v51 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v36 +; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 +; SI-NEXT: v_or_b32_e32 v26, v26, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v59 +; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v43 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_cvt_f16_f32_e32 v36, v30 +; SI-NEXT: v_or_b32_e32 v30, v28, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v58 +; SI-NEXT: v_lshr_b64 v[41:42], v[29:30], 16 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v36 +; SI-NEXT: v_or_b32_e32 v16, v16, v28 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v58, v27 +; SI-NEXT: v_cvt_f32_f16_e32 v27, v61 +; SI-NEXT: v_lshr_b64 v[43:44], v[15:16], 16 +; SI-NEXT: v_mov_b32_e32 v44, v34 +; SI-NEXT: v_mov_b32_e32 v42, v33 +; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v27 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v58 +; SI-NEXT: v_or_b32_e32 v20, v20, v27 +; SI-NEXT: v_lshr_b64 v[45:46], v[19:20], 16 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v61 +; SI-NEXT: v_or_b32_e32 v24, v24, v27 +; SI-NEXT: v_lshr_b64 v[33:34], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[47:48], v[23:24], 16 +; SI-NEXT: v_mov_b32_e32 v23, v36 +; SI-NEXT: v_mov_b32_e32 v46, v35 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_mov_b32_e32 v7, v63 +; SI-NEXT: v_mov_b32_e32 v34, v56 +; SI-NEXT: v_mov_b32_e32 v56, v62 +; SI-NEXT: v_lshr_b64 v[62:63], v[3:4], 16 +; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: v_mov_b32_e32 v19, v39 +; SI-NEXT: v_mov_b32_e32 v15, v38 +; SI-NEXT: v_lshr_b64 v[39:40], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[11:12], 16 +; SI-NEXT: v_mov_b32_e32 v11, v37 +; SI-NEXT: v_lshr_b64 v[37:38], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[27:28], v[1:2], 16 +; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v47 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v36 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v35 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v38 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v42 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v45 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v19, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v31 -; SI-NEXT: v_or_b32_e32 v19, v19, v20 -; SI-NEXT: v_add_i32_e32 v20, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v58 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v43 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v36 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v41 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v19 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v30 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v39 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v41 -; SI-NEXT: v_or_b32_e32 v3, v3, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v3, v19, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v32 -; SI-NEXT: v_or_b32_e32 v3, v3, v19 -; SI-NEXT: v_add_i32_e32 v19, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v3, v19, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v54 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v40 -; SI-NEXT: v_or_b32_e32 v3, v3, v18 -; SI-NEXT: v_add_i32_e32 v18, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v3, v18, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v3, v3, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v3, v14, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v17 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v55 -; SI-NEXT: v_or_b32_e32 v3, v3, v14 -; SI-NEXT: v_add_i32_e32 v14, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v3, v14, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v54 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v11 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v53 -; SI-NEXT: v_or_b32_e32 v3, v3, v9 -; SI-NEXT: v_add_i32_e32 v9, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v30 -; SI-NEXT: v_or_b32_e32 v3, v3, v5 -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v48 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v7 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v63 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v57 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v56 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v35 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v46 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v33 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v42 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v44 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v62 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v32 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v57 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v29 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v34 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v27 ; SI-NEXT: v_or_b32_e32 v1, v1, v3 ; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll index 7785a9938e1be..967f1a9b442b0 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll @@ -3847,361 +3847,396 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3 ; SI-LABEL: bitcast_v30i32_to_v60i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v18, s30, 0 +; SI-NEXT: v_writelane_b32 v18, s31, 1 +; SI-NEXT: v_writelane_b32 v18, s34, 2 +; SI-NEXT: v_writelane_b32 v18, s35, 3 +; SI-NEXT: v_writelane_b32 v18, s36, 4 +; SI-NEXT: v_writelane_b32 v18, s37, 5 +; SI-NEXT: v_writelane_b32 v18, s38, 6 +; SI-NEXT: v_writelane_b32 v18, s39, 7 +; SI-NEXT: v_writelane_b32 v18, s48, 8 +; SI-NEXT: v_writelane_b32 v18, s49, 9 +; SI-NEXT: v_writelane_b32 v18, s50, 10 +; SI-NEXT: v_writelane_b32 v18, s51, 11 +; SI-NEXT: v_writelane_b32 v18, s52, 12 +; SI-NEXT: v_writelane_b32 v18, s53, 13 +; SI-NEXT: v_writelane_b32 v18, s54, 14 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17 -; SI-NEXT: v_readfirstlane_b32 s45, v1 -; SI-NEXT: v_readfirstlane_b32 s44, v2 -; SI-NEXT: v_readfirstlane_b32 s43, v3 -; SI-NEXT: v_readfirstlane_b32 s42, v4 -; SI-NEXT: v_readfirstlane_b32 s41, v5 -; SI-NEXT: v_readfirstlane_b32 s40, v6 -; SI-NEXT: v_readfirstlane_b32 s15, v7 -; SI-NEXT: v_readfirstlane_b32 s14, v8 -; SI-NEXT: v_readfirstlane_b32 s13, v9 -; SI-NEXT: v_readfirstlane_b32 s12, v10 -; SI-NEXT: v_readfirstlane_b32 s11, v11 -; SI-NEXT: v_readfirstlane_b32 s10, v12 -; SI-NEXT: v_readfirstlane_b32 s9, v13 -; SI-NEXT: v_readfirstlane_b32 s8, v14 -; SI-NEXT: v_readfirstlane_b32 s7, v15 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v16 +; SI-NEXT: v_writelane_b32 v18, s55, 15 +; SI-NEXT: v_readfirstlane_b32 s42, v1 +; SI-NEXT: v_readfirstlane_b32 s43, v2 +; SI-NEXT: v_readfirstlane_b32 s40, v3 +; SI-NEXT: v_readfirstlane_b32 s41, v4 +; SI-NEXT: v_readfirstlane_b32 s14, v5 +; SI-NEXT: v_readfirstlane_b32 s15, v6 +; SI-NEXT: v_readfirstlane_b32 s12, v7 +; SI-NEXT: v_readfirstlane_b32 s13, v8 +; SI-NEXT: v_readfirstlane_b32 s10, v9 +; SI-NEXT: v_readfirstlane_b32 s11, v10 +; SI-NEXT: v_readfirstlane_b32 s8, v11 +; SI-NEXT: v_readfirstlane_b32 s9, v12 +; SI-NEXT: v_readfirstlane_b32 s6, v13 +; SI-NEXT: v_readfirstlane_b32 s7, v14 +; SI-NEXT: v_readfirstlane_b32 s4, v15 +; SI-NEXT: s_and_b64 s[44:45], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: v_writelane_b32 v18, s64, 16 ; SI-NEXT: s_cbranch_scc0 .LBB13_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s28 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v11, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s20 -; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s29, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s25, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s23, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s21, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s19, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 16 -; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 16 -; SI-NEXT: s_lshr_b32 s56, s10, 16 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s58, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s40, 16 -; SI-NEXT: s_lshr_b32 s60, s42, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s62, s29, 16 -; SI-NEXT: s_lshr_b32 s63, s27, 16 -; SI-NEXT: s_lshr_b32 s72, s25, 16 -; SI-NEXT: s_lshr_b32 s73, s23, 16 -; SI-NEXT: s_lshr_b32 s74, s21, 16 -; SI-NEXT: s_lshr_b32 s75, s19, 16 -; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b32 s34, s5, 16 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s36, s9, 16 +; SI-NEXT: s_lshr_b32 s37, s11, 16 +; SI-NEXT: s_lshr_b32 s38, s13, 16 +; SI-NEXT: s_lshr_b32 s39, s15, 16 +; SI-NEXT: s_lshr_b32 s48, s41, 16 +; SI-NEXT: s_lshr_b32 s49, s43, 16 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s27, 16 +; SI-NEXT: s_lshr_b32 s52, s25, 16 +; SI-NEXT: s_lshr_b32 s53, s23, 16 +; SI-NEXT: s_lshr_b32 s54, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s19, 16 +; SI-NEXT: s_lshr_b32 s64, s17, 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB13_3 ; SI-NEXT: .LBB13_2: ; %cmp.true +; SI-NEXT: s_add_i32 s17, s17, 3 ; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_add_i32 s19, s19, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s21, s21, 3 ; SI-NEXT: s_add_i32 s20, s20, 3 +; SI-NEXT: s_add_i32 s23, s23, 3 ; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_add_i32 s25, s25, 3 ; SI-NEXT: s_add_i32 s24, s24, 3 +; SI-NEXT: s_add_i32 s27, s27, 3 ; SI-NEXT: s_add_i32 s26, s26, 3 +; SI-NEXT: s_add_i32 s29, s29, 3 ; SI-NEXT: s_add_i32 s28, s28, 3 -; SI-NEXT: s_add_i32 s45, s45, 3 ; SI-NEXT: s_add_i32 s43, s43, 3 -; SI-NEXT: s_add_i32 s41, s41, 3 -; SI-NEXT: s_add_i32 s15, s15, 3 -; SI-NEXT: s_add_i32 s13, s13, 3 -; SI-NEXT: s_add_i32 s11, s11, 3 -; SI-NEXT: s_add_i32 s9, s9, 3 -; SI-NEXT: s_add_i32 s7, s7, 3 -; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: s_add_i32 s19, s19, 3 -; SI-NEXT: s_add_i32 s21, s21, 3 -; SI-NEXT: s_add_i32 s23, s23, 3 -; SI-NEXT: s_add_i32 s25, s25, 3 -; SI-NEXT: s_add_i32 s27, s27, 3 -; SI-NEXT: s_add_i32 s29, s29, 3 -; SI-NEXT: s_add_i32 s44, s44, 3 ; SI-NEXT: s_add_i32 s42, s42, 3 +; SI-NEXT: s_add_i32 s41, s41, 3 ; SI-NEXT: s_add_i32 s40, s40, 3 +; SI-NEXT: s_add_i32 s15, s15, 3 ; SI-NEXT: s_add_i32 s14, s14, 3 +; SI-NEXT: s_add_i32 s13, s13, 3 ; SI-NEXT: s_add_i32 s12, s12, 3 +; SI-NEXT: s_add_i32 s11, s11, 3 ; SI-NEXT: s_add_i32 s10, s10, 3 +; SI-NEXT: s_add_i32 s9, s9, 3 ; SI-NEXT: s_add_i32 s8, s8, 3 +; SI-NEXT: s_add_i32 s7, s7, 3 ; SI-NEXT: s_add_i32 s6, s6, 3 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s28 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v11, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s20 -; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s29, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s25, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s23, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s21, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s19, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 16 -; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 16 -; SI-NEXT: s_lshr_b32 s56, s10, 16 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s58, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s40, 16 -; SI-NEXT: s_lshr_b32 s60, s42, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s62, s29, 16 -; SI-NEXT: s_lshr_b32 s63, s27, 16 -; SI-NEXT: s_lshr_b32 s72, s25, 16 -; SI-NEXT: s_lshr_b32 s73, s23, 16 -; SI-NEXT: s_lshr_b32 s74, s21, 16 -; SI-NEXT: s_lshr_b32 s75, s19, 16 -; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_add_i32 s5, s5, 3 +; SI-NEXT: s_add_i32 s4, s4, 3 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[42:43], 16 +; SI-NEXT: s_lshr_b32 s34, s5, 16 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s36, s9, 16 +; SI-NEXT: s_lshr_b32 s37, s11, 16 +; SI-NEXT: s_lshr_b32 s38, s13, 16 +; SI-NEXT: s_lshr_b32 s39, s15, 16 +; SI-NEXT: s_lshr_b32 s48, s41, 16 +; SI-NEXT: s_lshr_b32 s49, s43, 16 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s27, 16 +; SI-NEXT: s_lshr_b32 s52, s25, 16 +; SI-NEXT: s_lshr_b32 s53, s23, 16 +; SI-NEXT: s_lshr_b32 s54, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s19, 16 +; SI-NEXT: s_lshr_b32 s64, s17, 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 ; SI-NEXT: .LBB13_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v16, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: buffer_store_dword v15, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s75, 16 -; SI-NEXT: buffer_store_dword v16, v15, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v15, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: s_lshl_b32 s45, s30, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s45 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s64, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s94, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s92, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s54, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v6, s16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_lshl_b32 s16, s90, 16 +; SI-NEXT: s_and_b32 s17, s22, 0xffff +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s45, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s44, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s64, v18, 16 +; SI-NEXT: v_readlane_b32 s55, v18, 15 +; SI-NEXT: v_readlane_b32 s54, v18, 14 +; SI-NEXT: v_readlane_b32 s53, v18, 13 +; SI-NEXT: v_readlane_b32 s52, v18, 12 +; SI-NEXT: v_readlane_b32 s51, v18, 11 +; SI-NEXT: v_readlane_b32 s50, v18, 10 +; SI-NEXT: v_readlane_b32 s49, v18, 9 +; SI-NEXT: v_readlane_b32 s48, v18, 8 +; SI-NEXT: v_readlane_b32 s39, v18, 7 +; SI-NEXT: v_readlane_b32 s38, v18, 6 +; SI-NEXT: v_readlane_b32 s37, v18, 5 +; SI-NEXT: v_readlane_b32 s36, v18, 4 +; SI-NEXT: v_readlane_b32 s35, v18, 3 +; SI-NEXT: v_readlane_b32 s34, v18, 2 +; SI-NEXT: v_readlane_b32 s31, v18, 1 +; SI-NEXT: v_readlane_b32 s30, v18, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB13_4: -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr75 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: s_branch .LBB13_2 ; ; VI-LABEL: bitcast_v30i32_to_v60i16_scalar: @@ -15902,245 +15937,248 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a, ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17 -; SI-NEXT: v_mov_b32_e32 v30, s16 +; SI-NEXT: v_mov_b32_e32 v27, s16 ; SI-NEXT: v_mov_b32_e32 v28, s17 -; SI-NEXT: v_mov_b32_e32 v33, s18 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_mov_b32_e32 v32, s19 -; SI-NEXT: v_mov_b32_e32 v29, s20 -; SI-NEXT: v_mov_b32_e32 v27, s21 -; SI-NEXT: v_mov_b32_e32 v25, s22 +; SI-NEXT: v_mov_b32_e32 v29, s18 +; SI-NEXT: v_mov_b32_e32 v30, s19 +; SI-NEXT: v_mov_b32_e32 v25, s20 +; SI-NEXT: v_mov_b32_e32 v26, s21 +; SI-NEXT: v_mov_b32_e32 v23, s22 ; SI-NEXT: v_mov_b32_e32 v24, s23 -; SI-NEXT: v_mov_b32_e32 v23, s24 -; SI-NEXT: v_mov_b32_e32 v21, s25 -; SI-NEXT: v_mov_b32_e32 v20, s26 -; SI-NEXT: v_mov_b32_e32 v19, s27 -; SI-NEXT: v_mov_b32_e32 v18, s28 -; SI-NEXT: v_mov_b32_e32 v17, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v21, s24 +; SI-NEXT: v_mov_b32_e32 v22, s25 +; SI-NEXT: v_mov_b32_e32 v19, s26 +; SI-NEXT: v_mov_b32_e32 v20, s27 +; SI-NEXT: s_and_b64 s[4:5], vcc, exec +; SI-NEXT: v_mov_b32_e32 v17, s28 +; SI-NEXT: v_mov_b32_e32 v18, s29 +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB29_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v22, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v26, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v31, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v34, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v35, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v36, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v38, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v48, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v51, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v53, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v55, v21, v23, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v25, 16 -; SI-NEXT: v_alignbit_b32 v44, v27, v29, 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[53:54], v[29:30], 16 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v46, v32, v33, 16 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v18 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v20 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v56, v28, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v22 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[27:28], 16 ; SI-NEXT: s_cbranch_execnz .LBB29_3 ; SI-NEXT: .LBB29_2: ; %cmp.true -; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 -; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 -; SI-NEXT: v_add_f32_e32 v32, 1.0, v32 -; SI-NEXT: v_add_f32_e32 v33, 1.0, v33 -; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 -; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 -; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 -; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 +; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 +; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 +; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 +; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 +; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 +; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 +; SI-NEXT: v_lshr_b64 v[31:32], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 +; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 +; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 +; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[17:18], 16 +; SI-NEXT: v_add_f32_e32 v22, 1.0, v22 ; SI-NEXT: v_add_f32_e32 v21, 1.0, v21 +; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 +; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 +; SI-NEXT: v_lshr_b64 v[33:34], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[19:20], 16 +; SI-NEXT: v_add_f32_e32 v24, 1.0, v24 ; SI-NEXT: v_add_f32_e32 v23, 1.0, v23 -; SI-NEXT: v_add_f32_e32 v19, 1.0, v19 -; SI-NEXT: v_add_f32_e32 v20, 1.0, v20 -; SI-NEXT: v_add_f32_e32 v17, 1.0, v17 -; SI-NEXT: v_add_f32_e32 v18, 1.0, v18 -; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 -; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 -; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 -; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 -; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 -; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 ; SI-NEXT: v_add_f32_e32 v8, 1.0, v8 ; SI-NEXT: v_add_f32_e32 v7, 1.0, v7 -; SI-NEXT: v_add_f32_e32 v10, 1.0, v10 -; SI-NEXT: v_add_f32_e32 v9, 1.0, v9 -; SI-NEXT: v_add_f32_e32 v12, 1.0, v12 -; SI-NEXT: v_add_f32_e32 v11, 1.0, v11 -; SI-NEXT: v_add_f32_e32 v14, 1.0, v14 -; SI-NEXT: v_add_f32_e32 v13, 1.0, v13 -; SI-NEXT: v_add_f32_e32 v16, 1.0, v16 -; SI-NEXT: v_add_f32_e32 v15, 1.0, v15 -; SI-NEXT: v_alignbit_b32 v22, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v26, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v31, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v34, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v35, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v36, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v38, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v48, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v51, v17, v18, 16 -; SI-NEXT: v_alignbit_b32 v53, v19, v20, 16 -; SI-NEXT: v_alignbit_b32 v55, v21, v23, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v25, 16 -; SI-NEXT: v_alignbit_b32 v44, v27, v29, 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[50:51], v[21:22], 16 +; SI-NEXT: v_add_f32_e32 v26, 1.0, v26 +; SI-NEXT: v_add_f32_e32 v25, 1.0, v25 +; SI-NEXT: v_add_f32_e32 v6, 1.0, v6 +; SI-NEXT: v_add_f32_e32 v5, 1.0, v5 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[23:24], 16 +; SI-NEXT: v_add_f32_e32 v30, 1.0, v30 +; SI-NEXT: v_add_f32_e32 v29, 1.0, v29 +; SI-NEXT: v_add_f32_e32 v4, 1.0, v4 +; SI-NEXT: v_add_f32_e32 v3, 1.0, v3 +; SI-NEXT: v_lshr_b64 v[36:37], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[25:26], 16 +; SI-NEXT: v_add_f32_e32 v28, 1.0, v28 +; SI-NEXT: v_add_f32_e32 v27, 1.0, v27 +; SI-NEXT: v_add_f32_e32 v2, 1.0, v2 +; SI-NEXT: v_add_f32_e32 v1, 1.0, v1 +; SI-NEXT: v_lshr_b64 v[37:38], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[53:54], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[27:28], 16 +; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v46, v32, v33, 16 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v18 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v20 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v56, v28, v30, 16 -; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v17 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v19 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v21 +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v22 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v24 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v27 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v32 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v28 ; SI-NEXT: .LBB29_3: ; %end -; SI-NEXT: v_and_b32_e32 v30, 0xffff, v30 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56 -; SI-NEXT: v_or_b32_e32 v30, v30, v56 -; SI-NEXT: buffer_store_dword v30, v0, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28 -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v60 -; SI-NEXT: v_or_b32_e32 v28, v28, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v46 -; SI-NEXT: v_or_b32_e32 v28, v28, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 8, v0 -; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v54 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 +; SI-NEXT: v_or_b32_e32 v27, v27, v39 +; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v28, 0xffff, v32 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v59 -; SI-NEXT: v_or_b32_e32 v28, v28, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v62 +; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v53 ; SI-NEXT: v_and_b32_e32 v28, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v44 -; SI-NEXT: v_or_b32_e32 v28, v28, v29 -; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0 -; SI-NEXT: buffer_store_dword v28, v29, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 +; SI-NEXT: v_or_b32_e32 v27, v28, v27 +; SI-NEXT: v_add_i32_e32 v28, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v58 +; SI-NEXT: v_and_b32_e32 v27, 0xffff, v30 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v61 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 -; SI-NEXT: v_add_i32_e32 v28, vcc, 20, v0 +; SI-NEXT: v_add_i32_e32 v28, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v52 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_or_b32_e32 v25, v25, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 +; SI-NEXT: v_add_i32_e32 v27, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v57 -; SI-NEXT: v_or_b32_e32 v24, v24, v25 -; SI-NEXT: v_add_i32_e32 v25, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v24, v25, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v60 +; SI-NEXT: v_or_b32_e32 v25, v25, v26 +; SI-NEXT: v_add_i32_e32 v26, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v51 ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 +; SI-NEXT: v_or_b32_e32 v23, v23, v25 +; SI-NEXT: v_add_i32_e32 v25, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v55 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v59 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 -; SI-NEXT: v_add_i32_e32 v24, vcc, 32, v0 +; SI-NEXT: v_add_i32_e32 v24, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v50 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 -; SI-NEXT: v_add_i32_e32 v23, vcc, 36, v0 +; SI-NEXT: v_add_i32_e32 v23, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v20, 0xffff, v20 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v53 -; SI-NEXT: v_or_b32_e32 v20, v20, v21 -; SI-NEXT: v_add_i32_e32 v21, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v20, v21, s[0:3], 0 offen +; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v58 +; SI-NEXT: v_or_b32_e32 v21, v21, v22 +; SI-NEXT: v_add_i32_e32 v22, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v49 +; SI-NEXT: v_or_b32_e32 v19, v19, v21 +; SI-NEXT: v_add_i32_e32 v21, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v57 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v18, 0xffff, v18 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v51 -; SI-NEXT: v_or_b32_e32 v18, v18, v19 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48 +; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 +; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v43 +; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v56 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v38 ; SI-NEXT: v_or_b32_e32 v1, v1, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v47 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -16152,7 +16190,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v45 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -16164,7 +16202,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -16176,92 +16214,94 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v33 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v13 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v32 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v31 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v37 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB29_4: -; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr44 +; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $vgpr48 +; SI-NEXT: ; implicit-def: $vgpr56 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr51 +; SI-NEXT: ; implicit-def: $vgpr44 ; SI-NEXT: ; implicit-def: $vgpr43 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr42 -; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr41 ; SI-NEXT: ; implicit-def: $vgpr40 +; SI-NEXT: ; implicit-def: $vgpr38 +; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr50 +; SI-NEXT: ; implicit-def: $vgpr33 +; SI-NEXT: ; implicit-def: $vgpr32 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr49 -; SI-NEXT: ; implicit-def: $vgpr26 -; SI-NEXT: ; implicit-def: $vgpr39 -; SI-NEXT: ; implicit-def: $vgpr22 -; SI-NEXT: ; implicit-def: $vgpr37 ; SI-NEXT: s_branch .LBB29_2 ; ; VI-LABEL: bitcast_v30f32_to_v60i16_scalar: @@ -27039,361 +27079,396 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3 ; SI-LABEL: bitcast_v15i64_to_v60i16_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: s_mov_b64 exec, s[4:5] +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_writelane_b32 v18, s30, 0 +; SI-NEXT: v_writelane_b32 v18, s31, 1 +; SI-NEXT: v_writelane_b32 v18, s34, 2 +; SI-NEXT: v_writelane_b32 v18, s35, 3 +; SI-NEXT: v_writelane_b32 v18, s36, 4 +; SI-NEXT: v_writelane_b32 v18, s37, 5 +; SI-NEXT: v_writelane_b32 v18, s38, 6 +; SI-NEXT: v_writelane_b32 v18, s39, 7 +; SI-NEXT: v_writelane_b32 v18, s48, 8 +; SI-NEXT: v_writelane_b32 v18, s49, 9 +; SI-NEXT: v_writelane_b32 v18, s50, 10 +; SI-NEXT: v_writelane_b32 v18, s51, 11 +; SI-NEXT: v_writelane_b32 v18, s52, 12 +; SI-NEXT: v_writelane_b32 v18, s53, 13 +; SI-NEXT: v_writelane_b32 v18, s54, 14 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17 -; SI-NEXT: v_readfirstlane_b32 s45, v1 -; SI-NEXT: v_readfirstlane_b32 s44, v2 -; SI-NEXT: v_readfirstlane_b32 s43, v3 -; SI-NEXT: v_readfirstlane_b32 s42, v4 -; SI-NEXT: v_readfirstlane_b32 s41, v5 -; SI-NEXT: v_readfirstlane_b32 s40, v6 -; SI-NEXT: v_readfirstlane_b32 s15, v7 -; SI-NEXT: v_readfirstlane_b32 s14, v8 -; SI-NEXT: v_readfirstlane_b32 s13, v9 -; SI-NEXT: v_readfirstlane_b32 s12, v10 -; SI-NEXT: v_readfirstlane_b32 s11, v11 -; SI-NEXT: v_readfirstlane_b32 s10, v12 -; SI-NEXT: v_readfirstlane_b32 s9, v13 -; SI-NEXT: v_readfirstlane_b32 s8, v14 -; SI-NEXT: v_readfirstlane_b32 s7, v15 -; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: v_readfirstlane_b32 s6, v16 +; SI-NEXT: v_writelane_b32 v18, s55, 15 +; SI-NEXT: v_readfirstlane_b32 s42, v1 +; SI-NEXT: v_readfirstlane_b32 s43, v2 +; SI-NEXT: v_readfirstlane_b32 s40, v3 +; SI-NEXT: v_readfirstlane_b32 s41, v4 +; SI-NEXT: v_readfirstlane_b32 s14, v5 +; SI-NEXT: v_readfirstlane_b32 s15, v6 +; SI-NEXT: v_readfirstlane_b32 s12, v7 +; SI-NEXT: v_readfirstlane_b32 s13, v8 +; SI-NEXT: v_readfirstlane_b32 s10, v9 +; SI-NEXT: v_readfirstlane_b32 s11, v10 +; SI-NEXT: v_readfirstlane_b32 s8, v11 +; SI-NEXT: v_readfirstlane_b32 s9, v12 +; SI-NEXT: v_readfirstlane_b32 s6, v13 +; SI-NEXT: v_readfirstlane_b32 s7, v14 +; SI-NEXT: v_readfirstlane_b32 s4, v15 +; SI-NEXT: s_and_b64 s[44:45], vcc, exec +; SI-NEXT: v_readfirstlane_b32 s5, v16 +; SI-NEXT: v_writelane_b32 v18, s64, 16 ; SI-NEXT: s_cbranch_scc0 .LBB41_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s28 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v11, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s20 -; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s29, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s25, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s23, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s21, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s19, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 16 -; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 16 -; SI-NEXT: s_lshr_b32 s56, s10, 16 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s58, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s40, 16 -; SI-NEXT: s_lshr_b32 s60, s42, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s62, s29, 16 -; SI-NEXT: s_lshr_b32 s63, s27, 16 -; SI-NEXT: s_lshr_b32 s72, s25, 16 -; SI-NEXT: s_lshr_b32 s73, s23, 16 -; SI-NEXT: s_lshr_b32 s74, s21, 16 -; SI-NEXT: s_lshr_b32 s75, s19, 16 -; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_lshr_b32 s34, s5, 16 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s36, s9, 16 +; SI-NEXT: s_lshr_b32 s37, s11, 16 +; SI-NEXT: s_lshr_b32 s38, s13, 16 +; SI-NEXT: s_lshr_b32 s39, s15, 16 +; SI-NEXT: s_lshr_b32 s48, s41, 16 +; SI-NEXT: s_lshr_b32 s49, s43, 16 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s27, 16 +; SI-NEXT: s_lshr_b32 s52, s25, 16 +; SI-NEXT: s_lshr_b32 s53, s23, 16 +; SI-NEXT: s_lshr_b32 s54, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s19, 16 +; SI-NEXT: s_lshr_b32 s64, s17, 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB41_3 ; SI-NEXT: .LBB41_2: ; %cmp.true -; SI-NEXT: s_add_u32 s16, s16, 3 -; SI-NEXT: s_addc_u32 s17, s17, 0 -; SI-NEXT: s_add_u32 s18, s18, 3 -; SI-NEXT: s_addc_u32 s19, s19, 0 -; SI-NEXT: s_add_u32 s20, s20, 3 -; SI-NEXT: s_addc_u32 s21, s21, 0 -; SI-NEXT: s_add_u32 s22, s22, 3 -; SI-NEXT: s_addc_u32 s23, s23, 0 -; SI-NEXT: s_add_u32 s24, s24, 3 -; SI-NEXT: s_addc_u32 s25, s25, 0 -; SI-NEXT: s_add_u32 s26, s26, 3 -; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s4, s4, 3 +; SI-NEXT: s_addc_u32 s5, s5, 0 +; SI-NEXT: s_add_u32 s6, s6, 3 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: s_add_u32 s8, s8, 3 +; SI-NEXT: s_addc_u32 s9, s9, 0 +; SI-NEXT: s_add_u32 s10, s10, 3 +; SI-NEXT: s_addc_u32 s11, s11, 0 +; SI-NEXT: s_add_u32 s12, s12, 3 +; SI-NEXT: s_addc_u32 s13, s13, 0 +; SI-NEXT: s_add_u32 s14, s14, 3 +; SI-NEXT: s_addc_u32 s15, s15, 0 +; SI-NEXT: s_add_u32 s40, s40, 3 +; SI-NEXT: s_addc_u32 s41, s41, 0 +; SI-NEXT: s_add_u32 s42, s42, 3 +; SI-NEXT: s_addc_u32 s43, s43, 0 ; SI-NEXT: s_add_u32 s28, s28, 3 ; SI-NEXT: s_addc_u32 s29, s29, 0 -; SI-NEXT: s_add_u32 s45, s45, 3 -; SI-NEXT: s_addc_u32 s44, s44, 0 -; SI-NEXT: s_add_u32 s43, s43, 3 -; SI-NEXT: s_addc_u32 s42, s42, 0 -; SI-NEXT: s_add_u32 s41, s41, 3 -; SI-NEXT: s_addc_u32 s40, s40, 0 -; SI-NEXT: s_add_u32 s15, s15, 3 -; SI-NEXT: s_addc_u32 s14, s14, 0 -; SI-NEXT: s_add_u32 s13, s13, 3 -; SI-NEXT: s_addc_u32 s12, s12, 0 -; SI-NEXT: s_add_u32 s11, s11, 3 -; SI-NEXT: s_addc_u32 s10, s10, 0 -; SI-NEXT: s_add_u32 s9, s9, 3 -; SI-NEXT: s_addc_u32 s8, s8, 0 -; SI-NEXT: s_add_u32 s7, s7, 3 -; SI-NEXT: s_addc_u32 s6, s6, 0 -; SI-NEXT: v_mov_b32_e32 v1, s7 -; SI-NEXT: v_mov_b32_e32 v2, s9 -; SI-NEXT: v_mov_b32_e32 v3, s11 -; SI-NEXT: v_mov_b32_e32 v4, s13 -; SI-NEXT: v_mov_b32_e32 v5, s15 -; SI-NEXT: v_mov_b32_e32 v6, s41 -; SI-NEXT: v_mov_b32_e32 v7, s43 -; SI-NEXT: v_mov_b32_e32 v8, s45 -; SI-NEXT: v_mov_b32_e32 v9, s28 -; SI-NEXT: v_mov_b32_e32 v10, s26 -; SI-NEXT: v_mov_b32_e32 v11, s24 -; SI-NEXT: v_mov_b32_e32 v12, s22 -; SI-NEXT: v_mov_b32_e32 v13, s20 -; SI-NEXT: v_mov_b32_e32 v14, s18 -; SI-NEXT: v_mov_b32_e32 v15, s16 -; SI-NEXT: v_alignbit_b32 v1, s6, v1, 16 -; SI-NEXT: v_alignbit_b32 v2, s8, v2, 16 -; SI-NEXT: v_alignbit_b32 v3, s10, v3, 16 -; SI-NEXT: v_alignbit_b32 v4, s12, v4, 16 -; SI-NEXT: v_alignbit_b32 v5, s14, v5, 16 -; SI-NEXT: v_alignbit_b32 v6, s40, v6, 16 -; SI-NEXT: v_alignbit_b32 v7, s42, v7, 16 -; SI-NEXT: v_alignbit_b32 v8, s44, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s29, v9, 16 -; SI-NEXT: v_alignbit_b32 v10, s27, v10, 16 -; SI-NEXT: v_alignbit_b32 v11, s25, v11, 16 -; SI-NEXT: v_alignbit_b32 v12, s23, v12, 16 -; SI-NEXT: v_alignbit_b32 v13, s21, v13, 16 -; SI-NEXT: v_alignbit_b32 v14, s19, v14, 16 -; SI-NEXT: v_alignbit_b32 v15, s17, v15, 16 -; SI-NEXT: s_lshr_b32 s46, s6, 16 -; SI-NEXT: s_lshr_b32 s47, s8, 16 -; SI-NEXT: s_lshr_b32 s56, s10, 16 -; SI-NEXT: s_lshr_b32 s57, s12, 16 -; SI-NEXT: s_lshr_b32 s58, s14, 16 -; SI-NEXT: s_lshr_b32 s59, s40, 16 -; SI-NEXT: s_lshr_b32 s60, s42, 16 -; SI-NEXT: s_lshr_b32 s61, s44, 16 -; SI-NEXT: s_lshr_b32 s62, s29, 16 -; SI-NEXT: s_lshr_b32 s63, s27, 16 -; SI-NEXT: s_lshr_b32 s72, s25, 16 -; SI-NEXT: s_lshr_b32 s73, s23, 16 -; SI-NEXT: s_lshr_b32 s74, s21, 16 -; SI-NEXT: s_lshr_b32 s75, s19, 16 -; SI-NEXT: s_lshr_b32 s76, s17, 16 +; SI-NEXT: s_add_u32 s26, s26, 3 +; SI-NEXT: s_addc_u32 s27, s27, 0 +; SI-NEXT: s_add_u32 s24, s24, 3 +; SI-NEXT: s_addc_u32 s25, s25, 0 +; SI-NEXT: s_add_u32 s22, s22, 3 +; SI-NEXT: s_addc_u32 s23, s23, 0 +; SI-NEXT: s_add_u32 s20, s20, 3 +; SI-NEXT: s_addc_u32 s21, s21, 0 +; SI-NEXT: s_add_u32 s18, s18, 3 +; SI-NEXT: s_addc_u32 s19, s19, 0 +; SI-NEXT: s_add_u32 s16, s16, 3 +; SI-NEXT: s_addc_u32 s17, s17, 0 +; SI-NEXT: s_lshr_b32 s34, s5, 16 +; SI-NEXT: s_lshr_b32 s35, s7, 16 +; SI-NEXT: s_lshr_b32 s36, s9, 16 +; SI-NEXT: s_lshr_b32 s37, s11, 16 +; SI-NEXT: s_lshr_b32 s38, s13, 16 +; SI-NEXT: s_lshr_b32 s39, s15, 16 +; SI-NEXT: s_lshr_b32 s48, s41, 16 +; SI-NEXT: s_lshr_b32 s49, s43, 16 +; SI-NEXT: s_lshr_b32 s50, s29, 16 +; SI-NEXT: s_lshr_b32 s51, s27, 16 +; SI-NEXT: s_lshr_b32 s52, s25, 16 +; SI-NEXT: s_lshr_b32 s53, s23, 16 +; SI-NEXT: s_lshr_b32 s54, s21, 16 +; SI-NEXT: s_lshr_b32 s55, s19, 16 +; SI-NEXT: s_lshr_b32 s64, s17, 16 +; SI-NEXT: s_lshr_b64 s[44:45], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[46:47], s[6:7], 16 +; SI-NEXT: s_lshr_b64 s[56:57], s[8:9], 16 +; SI-NEXT: s_lshr_b64 s[58:59], s[10:11], 16 +; SI-NEXT: s_lshr_b64 s[60:61], s[12:13], 16 +; SI-NEXT: s_lshr_b64 s[62:63], s[14:15], 16 +; SI-NEXT: s_lshr_b64 s[72:73], s[40:41], 16 +; SI-NEXT: s_lshr_b64 s[74:75], s[42:43], 16 +; SI-NEXT: s_lshr_b64 s[76:77], s[28:29], 16 +; SI-NEXT: s_lshr_b64 s[78:79], s[26:27], 16 +; SI-NEXT: s_lshr_b64 s[88:89], s[24:25], 16 +; SI-NEXT: s_lshr_b64 s[90:91], s[22:23], 16 +; SI-NEXT: s_lshr_b64 s[92:93], s[20:21], 16 +; SI-NEXT: s_lshr_b64 s[94:95], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[30:31], s[16:17], 16 ; SI-NEXT: .LBB41_3: ; %end -; SI-NEXT: s_and_b32 s4, s16, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; SI-NEXT: v_or_b32_e32 v15, s4, v15 -; SI-NEXT: s_and_b32 s4, s17, 0xffff -; SI-NEXT: s_lshl_b32 s5, s76, 16 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: v_mov_b32_e32 v16, s4 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: buffer_store_dword v15, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v15, vcc, 4, v0 -; SI-NEXT: v_or_b32_e32 v14, s4, v14 -; SI-NEXT: s_and_b32 s4, s19, 0xffff -; SI-NEXT: s_lshl_b32 s5, s75, 16 -; SI-NEXT: buffer_store_dword v16, v15, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v15, vcc, 8, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v15, s4 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; SI-NEXT: s_lshl_b32 s45, s30, 16 +; SI-NEXT: s_and_b32 s16, s16, 0xffff +; SI-NEXT: s_or_b32 s16, s16, s45 +; SI-NEXT: v_mov_b32_e32 v1, s16 +; SI-NEXT: s_and_b32 s16, s17, 0xffff +; SI-NEXT: s_lshl_b32 s17, s64, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_lshl_b32 s16, s94, 16 +; SI-NEXT: s_and_b32 s17, s18, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v3, s16 +; SI-NEXT: s_and_b32 s16, s19, 0xffff +; SI-NEXT: s_lshl_b32 s17, s55, 16 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: v_mov_b32_e32 v4, s16 +; SI-NEXT: s_lshl_b32 s16, s92, 16 +; SI-NEXT: s_and_b32 s17, s20, 0xffff +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: v_mov_b32_e32 v5, s16 +; SI-NEXT: s_and_b32 s16, s21, 0xffff +; SI-NEXT: s_lshl_b32 s17, s54, 16 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v14, vcc, 12, v0 -; SI-NEXT: v_or_b32_e32 v13, s4, v13 -; SI-NEXT: s_and_b32 s4, s21, 0xffff -; SI-NEXT: s_lshl_b32 s5, s74, 16 -; SI-NEXT: buffer_store_dword v15, v14, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v14, vcc, 16, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v14, s4 -; SI-NEXT: s_and_b32 s4, s22, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; SI-NEXT: v_add_i32_e32 v1, vcc, 4, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 8, v0 +; SI-NEXT: v_mov_b32_e32 v6, s16 +; SI-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 12, v0 +; SI-NEXT: s_lshl_b32 s16, s90, 16 +; SI-NEXT: s_and_b32 s17, s22, 0xffff +; SI-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 16, v0 +; SI-NEXT: s_or_b32 s16, s17, s16 +; SI-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 20, v0 +; SI-NEXT: s_waitcnt expcnt(3) +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s23, 0xffff +; SI-NEXT: s_lshl_b32 s17, s53, 16 +; SI-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen +; SI-NEXT: v_add_i32_e32 v1, vcc, 24, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v13, vcc, 20, v0 -; SI-NEXT: v_or_b32_e32 v12, s4, v12 -; SI-NEXT: s_and_b32 s4, s23, 0xffff -; SI-NEXT: s_lshl_b32 s5, s73, 16 -; SI-NEXT: buffer_store_dword v14, v13, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v13, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s24, 0xffff +; SI-NEXT: s_lshl_b32 s17, s88, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 28, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v12, vcc, 28, v0 -; SI-NEXT: v_or_b32_e32 v11, s4, v11 -; SI-NEXT: s_and_b32 s4, s25, 0xffff -; SI-NEXT: s_lshl_b32 s5, s72, 16 -; SI-NEXT: buffer_store_dword v13, v12, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v12, vcc, 32, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v12, s4 -; SI-NEXT: s_and_b32 s4, s26, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s25, 0xffff +; SI-NEXT: s_lshl_b32 s17, s52, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 32, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0 -; SI-NEXT: v_or_b32_e32 v10, s4, v10 -; SI-NEXT: s_and_b32 s4, s27, 0xffff -; SI-NEXT: s_lshl_b32 s5, s63, 16 -; SI-NEXT: buffer_store_dword v12, v11, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v11, vcc, 40, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v11, s4 -; SI-NEXT: s_and_b32 s4, s28, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s26, 0xffff +; SI-NEXT: s_lshl_b32 s17, s78, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 36, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v10, vcc, 44, v0 -; SI-NEXT: v_or_b32_e32 v9, s4, v9 -; SI-NEXT: s_and_b32 s4, s29, 0xffff -; SI-NEXT: s_lshl_b32 s5, s62, 16 -; SI-NEXT: buffer_store_dword v11, v10, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v10, vcc, 48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v10, s4 -; SI-NEXT: s_and_b32 s4, s45, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s27, 0xffff +; SI-NEXT: s_lshl_b32 s17, s51, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 40, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v9, vcc, 52, v0 -; SI-NEXT: v_or_b32_e32 v8, s4, v8 -; SI-NEXT: s_and_b32 s4, s44, 0xffff -; SI-NEXT: s_lshl_b32 s5, s61, 16 -; SI-NEXT: buffer_store_dword v10, v9, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v9, vcc, 56, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v9, s4 -; SI-NEXT: s_and_b32 s4, s43, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s28, 0xffff +; SI-NEXT: s_lshl_b32 s17, s76, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 44, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v8, vcc, 60, v0 -; SI-NEXT: v_or_b32_e32 v7, s4, v7 -; SI-NEXT: s_and_b32 s4, s42, 0xffff -; SI-NEXT: s_lshl_b32 s5, s60, 16 -; SI-NEXT: buffer_store_dword v9, v8, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v8, vcc, 64, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v8, s4 -; SI-NEXT: s_and_b32 s4, s41, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s29, 0xffff +; SI-NEXT: s_lshl_b32 s17, s50, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 48, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x44, v0 -; SI-NEXT: v_or_b32_e32 v6, s4, v6 -; SI-NEXT: s_and_b32 s4, s40, 0xffff -; SI-NEXT: s_lshl_b32 s5, s59, 16 -; SI-NEXT: buffer_store_dword v8, v7, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v7, vcc, 0x48, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v7, s4 -; SI-NEXT: s_and_b32 s4, s15, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s42, 0xffff +; SI-NEXT: s_lshl_b32 s17, s74, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 52, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x4c, v0 -; SI-NEXT: v_or_b32_e32 v5, s4, v5 -; SI-NEXT: s_and_b32 s4, s14, 0xffff -; SI-NEXT: s_lshl_b32 s5, s58, 16 -; SI-NEXT: buffer_store_dword v7, v6, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v6, s4 -; SI-NEXT: s_and_b32 s4, s13, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s43, 0xffff +; SI-NEXT: s_lshl_b32 s17, s49, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 56, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x54, v0 -; SI-NEXT: v_or_b32_e32 v4, s4, v4 -; SI-NEXT: s_and_b32 s4, s12, 0xffff -; SI-NEXT: s_lshl_b32 s5, s57, 16 -; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v5, vcc, 0x58, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v5, s4 -; SI-NEXT: s_and_b32 s4, s11, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s40, 0xffff +; SI-NEXT: s_lshl_b32 s17, s72, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 60, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x5c, v0 -; SI-NEXT: v_or_b32_e32 v3, s4, v3 -; SI-NEXT: s_and_b32 s4, s10, 0xffff -; SI-NEXT: s_lshl_b32 s5, s56, 16 -; SI-NEXT: buffer_store_dword v5, v4, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x60, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v4, s4 -; SI-NEXT: s_and_b32 s4, s9, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s16, s41, 0xffff +; SI-NEXT: s_lshl_b32 s17, s48, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 64, v0 +; SI-NEXT: s_or_b32 s16, s16, s17 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 -; SI-NEXT: v_or_b32_e32 v2, s4, v2 -; SI-NEXT: s_and_b32 s4, s8, 0xffff -; SI-NEXT: s_lshl_b32 s5, s47, 16 -; SI-NEXT: buffer_store_dword v4, v3, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 -; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen -; SI-NEXT: v_mov_b32_e32 v3, s4 -; SI-NEXT: s_and_b32 s4, s7, 0xffff -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_mov_b32_e32 v2, s16 +; SI-NEXT: s_and_b32 s14, s14, 0xffff +; SI-NEXT: s_lshl_b32 s16, s62, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x44, v0 +; SI-NEXT: s_or_b32 s14, s14, s16 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 -; SI-NEXT: v_or_b32_e32 v1, s4, v1 -; SI-NEXT: s_and_b32 s4, s6, 0xffff -; SI-NEXT: s_lshl_b32 s5, s46, 16 -; SI-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen -; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0 +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s14, s15, 0xffff +; SI-NEXT: s_lshl_b32 s15, s39, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x48, v0 +; SI-NEXT: s_or_b32 s14, s14, s15 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s14 +; SI-NEXT: s_and_b32 s12, s12, 0xffff +; SI-NEXT: s_lshl_b32 s14, s60, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x4c, v0 +; SI-NEXT: s_or_b32 s12, s12, s14 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s12, s13, 0xffff +; SI-NEXT: s_lshl_b32 s13, s38, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x50, v0 +; SI-NEXT: s_or_b32 s12, s12, s13 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s12 +; SI-NEXT: s_and_b32 s10, s10, 0xffff +; SI-NEXT: s_lshl_b32 s12, s58, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x54, v0 +; SI-NEXT: s_or_b32 s10, s10, s12 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s10, s11, 0xffff +; SI-NEXT: s_lshl_b32 s11, s37, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x58, v0 +; SI-NEXT: s_or_b32 s10, s10, s11 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s10 +; SI-NEXT: s_and_b32 s8, s8, 0xffff +; SI-NEXT: s_lshl_b32 s10, s56, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x5c, v0 +; SI-NEXT: s_or_b32 s8, s8, s10 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s8, s9, 0xffff +; SI-NEXT: s_lshl_b32 s9, s36, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x60, v0 +; SI-NEXT: s_or_b32 s8, s8, s9 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_lshl_b32 s8, s46, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x64, v0 +; SI-NEXT: s_or_b32 s6, s6, s8 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s6, s7, 0xffff +; SI-NEXT: s_lshl_b32 s7, s35, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x68, v0 +; SI-NEXT: s_or_b32 s6, s6, s7 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s6 +; SI-NEXT: s_and_b32 s4, s4, 0xffff +; SI-NEXT: s_lshl_b32 s6, s44, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x6c, v0 +; SI-NEXT: s_or_b32 s4, s4, s6 +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, s4 +; SI-NEXT: s_and_b32 s4, s5, 0xffff +; SI-NEXT: s_lshl_b32 s5, s34, 16 +; SI-NEXT: v_add_i32_e32 v1, vcc, 0x70, v0 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen +; SI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 -; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_mov_b32_e32 v1, s4 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: v_readlane_b32 s64, v18, 16 +; SI-NEXT: v_readlane_b32 s55, v18, 15 +; SI-NEXT: v_readlane_b32 s54, v18, 14 +; SI-NEXT: v_readlane_b32 s53, v18, 13 +; SI-NEXT: v_readlane_b32 s52, v18, 12 +; SI-NEXT: v_readlane_b32 s51, v18, 11 +; SI-NEXT: v_readlane_b32 s50, v18, 10 +; SI-NEXT: v_readlane_b32 s49, v18, 9 +; SI-NEXT: v_readlane_b32 s48, v18, 8 +; SI-NEXT: v_readlane_b32 s39, v18, 7 +; SI-NEXT: v_readlane_b32 s38, v18, 6 +; SI-NEXT: v_readlane_b32 s37, v18, 5 +; SI-NEXT: v_readlane_b32 s36, v18, 4 +; SI-NEXT: v_readlane_b32 s35, v18, 3 +; SI-NEXT: v_readlane_b32 s34, v18, 2 +; SI-NEXT: v_readlane_b32 s31, v18, 1 +; SI-NEXT: v_readlane_b32 s30, v18, 0 +; SI-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: s_mov_b64 exec, s[4:5] ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB41_4: -; SI-NEXT: ; implicit-def: $vgpr15 +; SI-NEXT: ; implicit-def: $sgpr30 +; SI-NEXT: ; implicit-def: $sgpr64 +; SI-NEXT: ; implicit-def: $sgpr94 +; SI-NEXT: ; implicit-def: $sgpr55 +; SI-NEXT: ; implicit-def: $sgpr92 +; SI-NEXT: ; implicit-def: $sgpr54 +; SI-NEXT: ; implicit-def: $sgpr90 +; SI-NEXT: ; implicit-def: $sgpr53 +; SI-NEXT: ; implicit-def: $sgpr88 +; SI-NEXT: ; implicit-def: $sgpr52 +; SI-NEXT: ; implicit-def: $sgpr78 +; SI-NEXT: ; implicit-def: $sgpr51 ; SI-NEXT: ; implicit-def: $sgpr76 -; SI-NEXT: ; implicit-def: $vgpr14 -; SI-NEXT: ; implicit-def: $sgpr75 -; SI-NEXT: ; implicit-def: $vgpr13 +; SI-NEXT: ; implicit-def: $sgpr50 ; SI-NEXT: ; implicit-def: $sgpr74 -; SI-NEXT: ; implicit-def: $vgpr12 -; SI-NEXT: ; implicit-def: $sgpr73 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr49 ; SI-NEXT: ; implicit-def: $sgpr72 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $sgpr63 -; SI-NEXT: ; implicit-def: $vgpr9 +; SI-NEXT: ; implicit-def: $sgpr48 ; SI-NEXT: ; implicit-def: $sgpr62 -; SI-NEXT: ; implicit-def: $vgpr8 -; SI-NEXT: ; implicit-def: $sgpr61 -; SI-NEXT: ; implicit-def: $vgpr7 +; SI-NEXT: ; implicit-def: $sgpr39 ; SI-NEXT: ; implicit-def: $sgpr60 -; SI-NEXT: ; implicit-def: $vgpr6 -; SI-NEXT: ; implicit-def: $sgpr59 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr38 ; SI-NEXT: ; implicit-def: $sgpr58 -; SI-NEXT: ; implicit-def: $vgpr4 -; SI-NEXT: ; implicit-def: $sgpr57 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr37 ; SI-NEXT: ; implicit-def: $sgpr56 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $sgpr47 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $sgpr36 ; SI-NEXT: ; implicit-def: $sgpr46 +; SI-NEXT: ; implicit-def: $sgpr35 +; SI-NEXT: ; implicit-def: $sgpr34 +; SI-NEXT: ; implicit-def: $sgpr44 ; SI-NEXT: s_branch .LBB41_2 ; ; VI-LABEL: bitcast_v15i64_to_v60i16_scalar: @@ -37262,203 +37337,207 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: s_and_b64 s[4:5], vcc, exec ; SI-NEXT: v_mov_b32_e32 v17, s28 ; SI-NEXT: v_mov_b32_e32 v18, s29 -; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill -; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill ; SI-NEXT: s_cbranch_scc0 .LBB49_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_alignbit_b32 v31, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v32, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v34, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v35, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v36, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v37, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v39, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v50, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v52, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v55, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v23, 16 -; SI-NEXT: v_alignbit_b32 v43, v26, v25, 16 +; SI-NEXT: v_lshr_b64 v[31:32], v[15:16], 16 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_lshr_b64 v[33:34], v[11:12], 16 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[36:37], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[1:2], 16 +; SI-NEXT: v_lshr_b64 v[37:38], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[53:54], v[25:26], 16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v46, v30, v29, 16 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v18 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v20 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v56, v28, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v22 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v24 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v28 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[39:40], v[27:28], 16 ; SI-NEXT: s_cbranch_execnz .LBB49_3 ; SI-NEXT: .LBB49_2: ; %cmp.true -; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 -; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 -; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 -; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 -; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 -; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 -; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 -; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 -; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 -; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 -; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 -; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 -; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 ; SI-NEXT: v_add_f64 v[15:16], v[15:16], 1.0 ; SI-NEXT: v_add_f64 v[13:14], v[13:14], 1.0 -; SI-NEXT: v_alignbit_b32 v31, v16, v15, 16 -; SI-NEXT: v_alignbit_b32 v32, v14, v13, 16 -; SI-NEXT: v_alignbit_b32 v33, v12, v11, 16 -; SI-NEXT: v_alignbit_b32 v34, v10, v9, 16 -; SI-NEXT: v_alignbit_b32 v35, v8, v7, 16 -; SI-NEXT: v_alignbit_b32 v36, v6, v5, 16 -; SI-NEXT: v_alignbit_b32 v37, v4, v3, 16 -; SI-NEXT: v_alignbit_b32 v39, v2, v1, 16 -; SI-NEXT: v_alignbit_b32 v50, v18, v17, 16 -; SI-NEXT: v_alignbit_b32 v52, v20, v19, 16 -; SI-NEXT: v_alignbit_b32 v55, v22, v21, 16 -; SI-NEXT: v_alignbit_b32 v41, v24, v23, 16 -; SI-NEXT: v_alignbit_b32 v43, v26, v25, 16 +; SI-NEXT: v_add_f64 v[11:12], v[11:12], 1.0 +; SI-NEXT: v_lshr_b64 v[31:32], v[15:16], 16 +; SI-NEXT: v_add_f64 v[9:10], v[9:10], 1.0 +; SI-NEXT: v_lshr_b64 v[32:33], v[13:14], 16 +; SI-NEXT: v_add_f64 v[7:8], v[7:8], 1.0 +; SI-NEXT: v_lshr_b64 v[33:34], v[11:12], 16 +; SI-NEXT: v_add_f64 v[5:6], v[5:6], 1.0 +; SI-NEXT: v_add_f64 v[1:2], v[1:2], 1.0 +; SI-NEXT: v_lshr_b64 v[34:35], v[9:10], 16 +; SI-NEXT: v_add_f64 v[3:4], v[3:4], 1.0 +; SI-NEXT: v_add_f64 v[23:24], v[23:24], 1.0 +; SI-NEXT: v_lshr_b64 v[35:36], v[7:8], 16 +; SI-NEXT: v_add_f64 v[17:18], v[17:18], 1.0 +; SI-NEXT: v_add_f64 v[19:20], v[19:20], 1.0 +; SI-NEXT: v_add_f64 v[25:26], v[25:26], 1.0 +; SI-NEXT: v_lshr_b64 v[36:37], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[51:52], v[1:2], 16 +; SI-NEXT: v_add_f64 v[21:22], v[21:22], 1.0 +; SI-NEXT: v_add_f64 v[29:30], v[29:30], 1.0 +; SI-NEXT: v_add_f64 v[27:28], v[27:28], 1.0 +; SI-NEXT: v_lshr_b64 v[37:38], v[3:4], 16 +; SI-NEXT: v_lshr_b64 v[52:53], v[23:24], 16 +; SI-NEXT: v_lshr_b64 v[38:39], v[17:18], 16 +; SI-NEXT: v_lshr_b64 v[48:49], v[19:20], 16 +; SI-NEXT: v_lshr_b64 v[53:54], v[25:26], 16 +; SI-NEXT: v_lshr_b64 v[49:50], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[54:55], v[29:30], 16 +; SI-NEXT: v_lshr_b64 v[39:40], v[27:28], 16 +; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v16 +; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v14 +; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v12 +; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v8 +; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v4 +; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v2 ; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: v_alignbit_b32 v46, v30, v29, 16 +; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v18 +; SI-NEXT: s_waitcnt expcnt(5) +; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v20 ; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_alignbit_b32 v56, v28, v27, 16 -; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12 -; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v10 -; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8 -; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v6 -; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2 -; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v18 -; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v20 -; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v22 +; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v22 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v24 +; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v24 ; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v26 +; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v26 ; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v30 +; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v30 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28 +; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v28 ; SI-NEXT: .LBB49_3: ; %end -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56 +; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v39 ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27 -; SI-NEXT: v_or_b32_e32 v27, v27, v56 +; SI-NEXT: v_or_b32_e32 v27, v27, v39 ; SI-NEXT: buffer_store_dword v27, v0, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v60 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v63 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 ; SI-NEXT: v_add_i32_e32 v28, vcc, 4, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v27, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v46 -; SI-NEXT: v_or_b32_e32 v27, v27, v28 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v54 +; SI-NEXT: v_and_b32_e32 v28, 0xffff, v29 +; SI-NEXT: v_or_b32_e32 v27, v28, v27 ; SI-NEXT: v_add_i32_e32 v28, vcc, 8, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v27, 0xffff, v30 -; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v59 +; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v62 ; SI-NEXT: v_or_b32_e32 v27, v27, v28 ; SI-NEXT: v_add_i32_e32 v28, vcc, 12, v0 ; SI-NEXT: buffer_store_dword v27, v28, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v43 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v53 +; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25 ; SI-NEXT: v_or_b32_e32 v25, v25, v27 ; SI-NEXT: v_add_i32_e32 v27, vcc, 16, v0 ; SI-NEXT: buffer_store_dword v25, v27, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v25, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v58 +; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v61 ; SI-NEXT: v_or_b32_e32 v25, v25, v26 ; SI-NEXT: v_add_i32_e32 v26, vcc, 20, v0 ; SI-NEXT: buffer_store_dword v25, v26, s[0:3], 0 offen -; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v41 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v52 +; SI-NEXT: v_and_b32_e32 v23, 0xffff, v23 ; SI-NEXT: v_or_b32_e32 v23, v23, v25 ; SI-NEXT: v_add_i32_e32 v25, vcc, 24, v0 ; SI-NEXT: buffer_store_dword v23, v25, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v23, 0xffff, v24 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v57 +; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v60 ; SI-NEXT: v_or_b32_e32 v23, v23, v24 ; SI-NEXT: v_add_i32_e32 v24, vcc, 28, v0 ; SI-NEXT: buffer_store_dword v23, v24, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v55 +; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v49 ; SI-NEXT: v_or_b32_e32 v21, v21, v23 ; SI-NEXT: v_add_i32_e32 v23, vcc, 32, v0 ; SI-NEXT: buffer_store_dword v21, v23, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v21, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v47 +; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v59 ; SI-NEXT: v_or_b32_e32 v21, v21, v22 ; SI-NEXT: v_add_i32_e32 v22, vcc, 36, v0 ; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v52 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v48 ; SI-NEXT: v_or_b32_e32 v19, v19, v21 ; SI-NEXT: v_add_i32_e32 v21, vcc, 40, v0 ; SI-NEXT: buffer_store_dword v19, v21, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v19, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v45 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v58 ; SI-NEXT: v_or_b32_e32 v19, v19, v20 ; SI-NEXT: v_add_i32_e32 v20, vcc, 44, v0 ; SI-NEXT: buffer_store_dword v19, v20, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v50 +; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v38 ; SI-NEXT: v_or_b32_e32 v17, v17, v19 ; SI-NEXT: v_add_i32_e32 v19, vcc, 48, v0 ; SI-NEXT: buffer_store_dword v17, v19, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v17, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v44 +; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v57 ; SI-NEXT: v_or_b32_e32 v17, v17, v18 ; SI-NEXT: v_add_i32_e32 v18, vcc, 52, v0 ; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v39 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v51 ; SI-NEXT: v_or_b32_e32 v1, v1, v17 ; SI-NEXT: v_add_i32_e32 v17, vcc, 56, v0 ; SI-NEXT: buffer_store_dword v1, v17, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v56 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37470,7 +37549,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v40 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v47 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37482,7 +37561,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v54 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v46 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37494,7 +37573,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v53 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v45 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37506,7 +37585,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v44 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37518,7 +37597,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v43 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37530,7 +37609,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v42 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0 ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen @@ -37542,56 +37621,59 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a, ; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) ; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v41 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload -; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB49_4: -; SI-NEXT: ; implicit-def: $vgpr56 +; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr63 +; SI-NEXT: ; implicit-def: $vgpr54 +; SI-NEXT: ; implicit-def: $vgpr62 +; SI-NEXT: ; implicit-def: $vgpr53 +; SI-NEXT: ; implicit-def: $vgpr61 +; SI-NEXT: ; implicit-def: $vgpr52 ; SI-NEXT: ; implicit-def: $vgpr60 -; SI-NEXT: ; implicit-def: $vgpr46 +; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr59 -; SI-NEXT: ; implicit-def: $vgpr43 +; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr58 -; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: ; implicit-def: $vgpr57 -; SI-NEXT: ; implicit-def: $vgpr55 +; SI-NEXT: ; implicit-def: $vgpr56 ; SI-NEXT: ; implicit-def: $vgpr47 -; SI-NEXT: ; implicit-def: $vgpr52 +; SI-NEXT: ; implicit-def: $vgpr46 ; SI-NEXT: ; implicit-def: $vgpr45 -; SI-NEXT: ; implicit-def: $vgpr50 ; SI-NEXT: ; implicit-def: $vgpr44 -; SI-NEXT: ; implicit-def: $vgpr39 +; SI-NEXT: ; implicit-def: $vgpr43 ; SI-NEXT: ; implicit-def: $vgpr42 +; SI-NEXT: ; implicit-def: $vgpr41 +; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr37 -; SI-NEXT: ; implicit-def: $vgpr40 ; SI-NEXT: ; implicit-def: $vgpr36 -; SI-NEXT: ; implicit-def: $vgpr54 ; SI-NEXT: ; implicit-def: $vgpr35 -; SI-NEXT: ; implicit-def: $vgpr53 ; SI-NEXT: ; implicit-def: $vgpr34 -; SI-NEXT: ; implicit-def: $vgpr51 ; SI-NEXT: ; implicit-def: $vgpr33 -; SI-NEXT: ; implicit-def: $vgpr49 ; SI-NEXT: ; implicit-def: $vgpr32 -; SI-NEXT: ; implicit-def: $vgpr48 ; SI-NEXT: ; implicit-def: $vgpr31 -; SI-NEXT: ; implicit-def: $vgpr38 ; SI-NEXT: s_branch .LBB49_2 ; ; VI-LABEL: bitcast_v15f64_to_v60i16_scalar: @@ -51097,566 +51179,651 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i ; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:76 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill ; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill -; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64 -; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 -; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:4 -; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:8 -; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:12 -; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:16 -; SI-NEXT: s_waitcnt expcnt(6) -; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:20 -; SI-NEXT: s_waitcnt expcnt(5) -; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:24 -; SI-NEXT: s_waitcnt expcnt(4) -; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12 +; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:16 +; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:28 +; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:32 +; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:44 +; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:48 +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:60 +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:4 +; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:8 +; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:20 +; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:24 +; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36 +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:40 +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:52 +; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:56 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: s_waitcnt expcnt(3) -; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:32 -; SI-NEXT: s_waitcnt expcnt(2) -; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:36 -; SI-NEXT: s_waitcnt expcnt(1) -; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:40 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v30 +; SI-NEXT: v_cvt_f16_f32_e32 v30, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v24 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v18 +; SI-NEXT: v_cvt_f16_f32_e32 v54, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v22 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v36, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v26 +; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:44 -; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:48 -; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:52 -; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:56 -; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60 -; SI-NEXT: v_cvt_f16_f32_e32 v51, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v48, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v6 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f16_f32_e32 v42, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v62, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v23, s19 +; SI-NEXT: v_cvt_f16_f32_e32 v19, s23 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v1, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v38, v10 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v14 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v19 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v20, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v24 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v25 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v29, s22 -; SI-NEXT: v_cvt_f16_f32_e32 v30, s23 -; SI-NEXT: v_cvt_f16_f32_e32 v26, s26 -; SI-NEXT: v_cvt_f16_f32_e32 v27, s27 -; SI-NEXT: v_cvt_f16_f32_e32 v28, s28 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v42, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v26, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v40, v8 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v22, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v47, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v18, v13 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v52, v16 +; SI-NEXT: v_cvt_f16_f32_e32 v16, v17 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v21 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v25 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v28 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v29 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f16_f32_e32 v58, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v28, s26 ; SI-NEXT: s_waitcnt vmcnt(14) -; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v23 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v43 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v44 -; SI-NEXT: v_cvt_f16_f32_e32 v9, v45 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v33 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v34 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v35 +; SI-NEXT: v_cvt_f16_f32_e32 v59, v38 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v39 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v48, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v39, v50 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v49 +; SI-NEXT: v_cvt_f16_f32_e32 v34, s24 +; SI-NEXT: v_cvt_f16_f32_e32 v49, s27 +; SI-NEXT: v_cvt_f16_f32_e32 v32, s28 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(1) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v50, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v57, v53 +; SI-NEXT: s_waitcnt vmcnt(14) +; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v41 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v43 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v44 ; SI-NEXT: s_waitcnt vmcnt(13) -; SI-NEXT: v_cvt_f16_f32_e32 v10, v46 -; SI-NEXT: s_waitcnt vmcnt(12) -; SI-NEXT: v_cvt_f16_f32_e32 v15, v47 -; SI-NEXT: s_waitcnt vmcnt(11) -; SI-NEXT: v_cvt_f16_f32_e32 v23, v57 -; SI-NEXT: s_waitcnt vmcnt(10) -; SI-NEXT: v_cvt_f16_f32_e32 v6, v58 -; SI-NEXT: s_waitcnt vmcnt(9) -; SI-NEXT: v_cvt_f16_f32_e32 v7, v59 -; SI-NEXT: s_waitcnt vmcnt(8) -; SI-NEXT: v_cvt_f16_f32_e32 v14, v60 -; SI-NEXT: s_waitcnt vmcnt(7) -; SI-NEXT: v_cvt_f16_f32_e32 v19, v61 -; SI-NEXT: s_waitcnt vmcnt(6) -; SI-NEXT: v_cvt_f16_f32_e32 v3, v62 -; SI-NEXT: s_waitcnt vmcnt(4) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v63 -; SI-NEXT: s_waitcnt vmcnt(3) -; SI-NEXT: v_cvt_f16_f32_e32 v36, v39 -; SI-NEXT: s_waitcnt vmcnt(2) -; SI-NEXT: v_cvt_f16_f32_e32 v62, v50 -; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v2, v53 -; SI-NEXT: v_cvt_f16_f32_e32 v47, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v60, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v44, s18 -; SI-NEXT: v_cvt_f16_f32_e32 v45, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v46, s20 -; SI-NEXT: v_cvt_f16_f32_e32 v59, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v43, s24 -; SI-NEXT: v_cvt_f16_f32_e32 v58, s25 -; SI-NEXT: v_cvt_f16_f32_e32 v57, s29 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v45 +; SI-NEXT: v_cvt_f16_f32_e32 v35, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v38, s21 +; SI-NEXT: v_cvt_f16_f32_e32 v33, s22 +; SI-NEXT: v_cvt_f16_f32_e32 v44, s29 ; SI-NEXT: s_and_b64 s[4:5], vcc, exec -; SI-NEXT: s_cbranch_scc0 .LBB59_2 +; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v46 +; SI-NEXT: v_cvt_f16_f32_e32 v46, s25 +; SI-NEXT: s_cbranch_scc0 .LBB59_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_mov_b64 s[4:5], 0 -; SI-NEXT: s_branch .LBB59_3 -; SI-NEXT: .LBB59_2: -; SI-NEXT: s_mov_b64 s[4:5], -1 -; SI-NEXT: .LBB59_3: ; %Flow -; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5] -; SI-NEXT: v_mov_b32_e32 v61, v14 -; SI-NEXT: v_mov_b32_e32 v63, v15 -; SI-NEXT: v_mov_b32_e32 v15, v18 -; SI-NEXT: v_mov_b32_e32 v18, v22 -; SI-NEXT: v_mov_b32_e32 v22, v33 -; SI-NEXT: v_mov_b32_e32 v33, v11 -; SI-NEXT: v_mov_b32_e32 v11, v8 -; SI-NEXT: v_mov_b32_e32 v8, v5 -; SI-NEXT: v_mov_b32_e32 v5, v42 -; SI-NEXT: v_mov_b32_e32 v42, v1 -; SI-NEXT: s_cbranch_vccnz .LBB59_5 -; SI-NEXT: ; %bb.4: ; %cmp.true -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: s_cbranch_execnz .LBB59_3 +; SI-NEXT: .LBB59_2: ; %cmp.true +; SI-NEXT: v_cvt_f32_f16_e32 v5, v23 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v35 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v58 +; SI-NEXT: v_mov_b32_e32 v43, v34 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v23, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v23 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v35, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v19 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v33 +; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v19, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v19 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_or_b32_e32 v33, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v49 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v28 +; SI-NEXT: v_mov_b32_e32 v41, v32 +; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v49, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v46 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v49 +; SI-NEXT: v_mov_b32_e32 v53, v27 +; SI-NEXT: v_or_b32_e32 v28, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v30 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_mov_b32_e32 v38, v55 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v44 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v62 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v42 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 -; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NEXT: v_cvt_f32_f16_e32 v17, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v40 +; SI-NEXT: v_mov_b32_e32 v40, v54 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v47 +; SI-NEXT: v_lshr_b64 v[46:47], v[27:28], 16 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v52 +; SI-NEXT: v_mov_b32_e32 v52, v15 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v30, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v26 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 -; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7 -; SI-NEXT: v_cvt_f32_f16_e32 v16, v16 -; SI-NEXT: v_cvt_f32_f16_e32 v21, v21 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6 -; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10 -; SI-NEXT: v_cvt_f32_f16_e32 v20, v20 -; SI-NEXT: v_cvt_f32_f16_e32 v32, v32 -; SI-NEXT: v_or_b32_e32 v62, v1, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9 -; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13 -; SI-NEXT: v_cvt_f32_f16_e32 v31, v31 -; SI-NEXT: v_cvt_f32_f16_e32 v35, v35 -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12 -; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17 -; SI-NEXT: v_cvt_f32_f16_e32 v34, v34 -; SI-NEXT: v_cvt_f32_f16_e32 v38, v38 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v4 -; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NEXT: v_cvt_f16_f32_e32 v17, v17 -; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16 -; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21 -; SI-NEXT: v_cvt_f32_f16_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v3, v3, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v7 -; SI-NEXT: v_cvt_f16_f32_e32 v16, v16 -; SI-NEXT: v_cvt_f16_f32_e32 v21, v21 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v26, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v22 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v22, v3, v5 +; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v3, v18 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill +; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; SI-NEXT: v_or_b32_e32 v18, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v37 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v16 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_mov_b32_e32 v51, v11 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v55, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v55 +; SI-NEXT: v_or_b32_e32 v16, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v63 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v14 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v37, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37 +; SI-NEXT: v_or_b32_e32 v14, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v61 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v12 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v63, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v63 +; SI-NEXT: v_or_b32_e32 v12, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v60 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v10 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v61, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v61 +; SI-NEXT: v_or_b32_e32 v10, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v57 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7 +; SI-NEXT: v_or_b32_e32 v57, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v59 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_lshr_b64 v[58:59], v[34:35], 16 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v50 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v8 +; SI-NEXT: v_mov_b32_e32 v8, v48 +; SI-NEXT: v_cvt_f16_f32_e32 v48, v5 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v48 +; SI-NEXT: v_or_b32_e32 v6, v3, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v4 +; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cvt_f32_f16_e32 v1, v44 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v31 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v60, v4 +; SI-NEXT: v_mov_b32_e32 v59, v48 +; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v38 +; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v60 +; SI-NEXT: v_or_b32_e32 v4, v3, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v38, v43 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshr_b64 v[47:48], v[17:18], 16 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshr_b64 v[44:45], v[29:30], 16 +; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v24 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v39 +; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v31, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v31 +; SI-NEXT: v_or_b32_e32 v2, v2, v20 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v62 +; SI-NEXT: v_or_b32_e32 v62, v24, v32 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 ; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 -; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32 -; SI-NEXT: v_or_b32_e32 v6, v6, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v10 ; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 -; SI-NEXT: v_cvt_f16_f32_e32 v32, v32 -; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31 -; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35 -; SI-NEXT: v_or_b32_e32 v9, v9, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v13 -; SI-NEXT: v_cvt_f16_f32_e32 v31, v31 -; SI-NEXT: v_cvt_f16_f32_e32 v35, v35 -; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v42, v24, v27 +; SI-NEXT: v_mov_b32_e32 v48, v62 +; SI-NEXT: v_or_b32_e32 v50, v20, v34 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v41 +; SI-NEXT: v_mov_b32_e32 v34, v42 +; SI-NEXT: v_lshr_b64 v[42:43], v[25:26], 16 +; SI-NEXT: v_mov_b32_e32 v62, v50 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v41, v20, v29 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v38, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload ; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 -; SI-NEXT: v_or_b32_e32 v12, v12, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v17 -; SI-NEXT: v_cvt_f16_f32_e32 v34, v34 ; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 -; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v37 -; SI-NEXT: v_or_b32_e32 v16, v16, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v21 -; SI-NEXT: v_cvt_f16_f32_e32 v37, v37 -; SI-NEXT: v_or_b32_e32 v20, v20, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v32 -; SI-NEXT: v_or_b32_e32 v31, v31, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v35 -; SI-NEXT: v_or_b32_e32 v34, v34, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v38 -; SI-NEXT: v_or_b32_e32 v37, v37, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v48, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v51 -; SI-NEXT: v_cvt_f32_f16_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v49, v39 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v48 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v26, v26 -; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v49 -; SI-NEXT: v_cvt_f32_f16_e32 v30, v30 -; SI-NEXT: v_or_b32_e32 v48, v39, v48 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v52 -; SI-NEXT: v_cvt_f32_f16_e32 v29, v29 -; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27 -; SI-NEXT: v_cvt_f16_f32_e32 v27, v27 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v52, v39 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26 -; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30 -; SI-NEXT: v_cvt_f16_f32_e32 v26, v26 -; SI-NEXT: v_cvt_f16_f32_e32 v30, v30 -; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29 -; SI-NEXT: v_cvt_f16_f32_e32 v29, v29 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v52 -; SI-NEXT: v_or_b32_e32 v51, v39, v50 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v27 -; SI-NEXT: v_or_b32_e32 v26, v26, v39 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v30 -; SI-NEXT: v_or_b32_e32 v29, v29, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v45 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v44 -; SI-NEXT: v_cvt_f32_f16_e32 v53, v59 -; SI-NEXT: v_cvt_f32_f16_e32 v56, v56 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 -; SI-NEXT: v_cvt_f16_f32_e32 v45, v39 -; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v53, 0x38000000, v53 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v45 -; SI-NEXT: v_cvt_f16_f32_e32 v53, v53 -; SI-NEXT: v_or_b32_e32 v44, v39, v50 -; SI-NEXT: v_cvt_f32_f16_e32 v39, v60 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v47 -; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v53 -; SI-NEXT: v_add_f32_e32 v56, 0x38000000, v56 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v24, v8 +; SI-NEXT: v_or_b32_e32 v8, v38, v25 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v20, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v38, v8 +; SI-NEXT: v_or_b32_e32 v8, v24, v21 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v8, v20, v17 +; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill +; SI-NEXT: v_cvt_f32_f16_e32 v24, v40 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v36 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v8, v38, v15 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_cvt_f32_f16_e32 v38, v53 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 +; SI-NEXT: v_or_b32_e32 v54, v24, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v24, v52 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v52, v38, v9 +; SI-NEXT: v_mov_b32_e32 v27, v52 +; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 +; SI-NEXT: v_or_b32_e32 v36, v20, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v20, v51 +; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 +; SI-NEXT: v_lshr_b64 v[52:53], v[15:16], 16 +; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20 +; SI-NEXT: v_cvt_f16_f32_e32 v20, v20 +; SI-NEXT: v_or_b32_e32 v51, v24, v56 +; SI-NEXT: v_mov_b32_e32 v15, v51 +; SI-NEXT: v_lshr_b64 v[50:51], v[13:14], 16 +; SI-NEXT: v_or_b32_e32 v24, v20, v5 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v38, v8 +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v38 +; SI-NEXT: v_cvt_f16_f32_e32 v38, v38 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_f16_e32 v39, v8 +; SI-NEXT: v_or_b32_e32 v8, v38, v3 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill ; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v39 ; SI-NEXT: v_cvt_f16_f32_e32 v39, v39 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v56, v56 -; SI-NEXT: v_lshlrev_b32_e32 v39, 16, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v23, v23 -; SI-NEXT: v_or_b32_e32 v47, v50, v39 -; SI-NEXT: v_cvt_f32_f16_e32 v50, v46 -; SI-NEXT: v_cvt_f32_f16_e32 v46, v58 -; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56 -; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23 -; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50 -; SI-NEXT: v_add_f32_e32 v46, 0x38000000, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v50, v50 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v46 -; SI-NEXT: v_cvt_f16_f32_e32 v23, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v57, v57 -; SI-NEXT: v_or_b32_e32 v46, v50, v53 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v58 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_or_b32_e32 v8, v39, v1 +; SI-NEXT: v_lshr_b64 v[38:39], v[32:33], 16 +; SI-NEXT: v_mov_b32_e32 v32, v41 +; SI-NEXT: v_lshr_b64 v[40:41], v[21:22], 16 +; SI-NEXT: v_lshr_b64 v[20:21], v[11:12], 16 +; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[20:21], v[56:57], 16 +; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill +; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill +; SI-NEXT: v_mov_b32_e32 v11, v24 +; SI-NEXT: v_lshr_b64 v[8:9], v[9:10], 16 +; SI-NEXT: v_mov_b32_e32 v39, v31 +; SI-NEXT: v_mov_b32_e32 v31, v60 +; SI-NEXT: v_mov_b32_e32 v60, v61 +; SI-NEXT: v_mov_b32_e32 v61, v63 +; SI-NEXT: v_mov_b32_e32 v63, v37 +; SI-NEXT: v_mov_b32_e32 v37, v55 +; SI-NEXT: v_lshr_b64 v[55:56], v[5:6], 16 +; SI-NEXT: v_lshr_b64 v[24:25], v[3:4], 16 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshr_b64 v[20:21], v[1:2], 16 +; SI-NEXT: .LBB59_3: ; %end +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v58 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v62 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v35 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v23 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 4, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v38 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v48 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 8, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v33 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 12, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v46 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v34 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 16, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v28 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v49 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v44 +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v32 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v30 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v42 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v26 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v40 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; SI-NEXT: v_or_b32_e32 v1, v3, v1 +; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v18 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v52 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v16 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v54 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v50 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v14 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v63 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v36 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v58, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v42, v42 -; SI-NEXT: v_cvt_f32_f16_e32 v41, v41 -; SI-NEXT: v_cvt_f32_f16_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v58 -; SI-NEXT: v_cvt_f32_f16_e32 v55, v55 -; SI-NEXT: v_cvt_f32_f16_e32 v54, v54 -; SI-NEXT: v_cvt_f32_f16_e32 v25, v25 -; SI-NEXT: v_or_b32_e32 v14, v58, v56 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v63 -; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill -; SI-NEXT: v_cvt_f32_f16_e32 v24, v24 -; SI-NEXT: v_cvt_f32_f16_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v58 -; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v13 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_cvt_f32_f16_e32 v14, v36 -; SI-NEXT: v_cvt_f32_f16_e32 v43, v43 -; SI-NEXT: v_cvt_f32_f16_e32 v28, v28 -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NEXT: v_cvt_f32_f16_e32 v33, v33 -; SI-NEXT: v_cvt_f32_f16_e32 v22, v22 -; SI-NEXT: v_cvt_f32_f16_e32 v18, v18 -; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NEXT: v_or_b32_e32 v63, v58, v23 -; SI-NEXT: v_cvt_f32_f16_e32 v58, v61 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v12 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x4c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v27 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v8 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v10 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload +; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v15 ; SI-NEXT: s_waitcnt vmcnt(1) -; SI-NEXT: v_cvt_f32_f16_e32 v36, v1 -; SI-NEXT: v_add_f32_e32 v57, 0x38000000, v57 -; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42 -; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41 -; SI-NEXT: v_add_f32_e32 v40, 0x38000000, v40 -; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v55 -; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v54 -; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25 -; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24 -; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19 -; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14 -; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v43 -; SI-NEXT: v_cvt_f16_f32_e32 v57, v57 -; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v42, v42 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v41, v41 -; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v40, v40 -; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v55, v55 -; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v54, v54 -; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v25, v25 -; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v24, v24 -; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v19, v19 -; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36 -; SI-NEXT: v_cvt_f16_f32_e32 v43, v43 -; SI-NEXT: v_cvt_f16_f32_e32 v28, v28 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NEXT: v_cvt_f16_f32_e32 v33, v33 -; SI-NEXT: v_cvt_f16_f32_e32 v22, v22 -; SI-NEXT: v_cvt_f16_f32_e32 v18, v18 -; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NEXT: v_cvt_f16_f32_e32 v58, v58 -; SI-NEXT: v_cvt_f16_f32_e32 v36, v36 -; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v57 -; SI-NEXT: v_lshlrev_b32_e32 v42, 16, v42 -; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v41 -; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40 -; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v55 -; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v54 -; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25 -; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; SI-NEXT: v_or_b32_e32 v43, v43, v50 -; SI-NEXT: v_or_b32_e32 v28, v28, v57 -; SI-NEXT: v_or_b32_e32 v5, v5, v42 -; SI-NEXT: v_or_b32_e32 v8, v8, v41 -; SI-NEXT: v_or_b32_e32 v11, v11, v40 -; SI-NEXT: v_or_b32_e32 v33, v33, v55 -; SI-NEXT: v_or_b32_e32 v22, v22, v54 -; SI-NEXT: v_or_b32_e32 v18, v18, v25 -; SI-NEXT: v_or_b32_e32 v15, v15, v24 -; SI-NEXT: v_or_b32_e32 v61, v58, v19 -; SI-NEXT: v_or_b32_e32 v1, v36, v14 -; SI-NEXT: v_alignbit_b32 v60, v44, v39, 16 -; SI-NEXT: v_alignbit_b32 v59, v29, v53, 16 -; SI-NEXT: v_alignbit_b32 v58, v26, v50, 16 -; SI-NEXT: v_alignbit_b32 v57, v51, v57, 16 -; SI-NEXT: v_alignbit_b32 v56, v48, v56, 16 -; SI-NEXT: v_alignbit_b32 v42, v37, v42, 16 -; SI-NEXT: v_alignbit_b32 v41, v34, v41, 16 -; SI-NEXT: v_alignbit_b32 v40, v31, v40, 16 -; SI-NEXT: v_alignbit_b32 v55, v20, v55, 16 -; SI-NEXT: v_alignbit_b32 v54, v16, v54, 16 -; SI-NEXT: v_alignbit_b32 v25, v12, v25, 16 -; SI-NEXT: v_alignbit_b32 v24, v9, v24, 16 -; SI-NEXT: v_alignbit_b32 v23, v6, v23, 16 -; SI-NEXT: v_alignbit_b32 v19, v3, v19, 16 -; SI-NEXT: v_alignbit_b32 v36, v62, v14, 16 -; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill -; SI-NEXT: .LBB59_5: ; %end -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v47 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v60 -; SI-NEXT: v_or_b32_e32 v39, v39, v50 -; SI-NEXT: buffer_store_dword v39, v0, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v44 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v45 -; SI-NEXT: v_or_b32_e32 v39, v39, v50 -; SI-NEXT: v_add_i32_e32 v50, vcc, 4, v0 -; SI-NEXT: buffer_store_dword v39, v50, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v39, 0xffff, v46 -; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v59 -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; SI-NEXT: v_or_b32_e32 v39, v39, v50 -; SI-NEXT: v_add_i32_e32 v50, vcc, 8, v0 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0 -; SI-NEXT: buffer_store_dword v39, v50, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v29, 0xffff, v43 -; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v58 -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v26 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; SI-NEXT: v_or_b32_e32 v29, v29, v30 -; SI-NEXT: v_add_i32_e32 v30, vcc, 16, v0 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 20, v0 -; SI-NEXT: buffer_store_dword v29, v30, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v28 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v57 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 24, v0 -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v51 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v52 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 28, v0 -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v56 -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5 -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3 -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x6c, v0 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v14 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 32, v0 -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v26, 0xffff, v48 -; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v49 -; SI-NEXT: v_or_b32_e32 v26, v26, v27 -; SI-NEXT: v_add_i32_e32 v27, vcc, 36, v0 -; SI-NEXT: buffer_store_dword v26, v27, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v42 -; SI-NEXT: v_or_b32_e32 v5, v5, v26 -; SI-NEXT: v_add_i32_e32 v26, vcc, 40, v0 -; SI-NEXT: buffer_store_dword v5, v26, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v37 -; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v38 -; SI-NEXT: v_or_b32_e32 v5, v5, v26 -; SI-NEXT: v_add_i32_e32 v26, vcc, 44, v0 -; SI-NEXT: buffer_store_dword v5, v26, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v8 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v34 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v35 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 52, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v11 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v40 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 56, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v31 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v32 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 60, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v33 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v55 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 64, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v20 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v21 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x44, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v22 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v54 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x48, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v16 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v17 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v18 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v25 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x50, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v12 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v13 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x54, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v15 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v24 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x58, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v9 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v10 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x5c, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v63 -; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v23 -; SI-NEXT: v_or_b32_e32 v5, v5, v8 -; SI-NEXT: v_add_i32_e32 v8, vcc, 0x60, v0 -; SI-NEXT: buffer_store_dword v5, v8, s[0:3], 0 offen -; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v6 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x64, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v8 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen ; SI-NEXT: s_waitcnt expcnt(0) -; SI-NEXT: v_and_b32_e32 v5, 0xffff, v61 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v19 -; SI-NEXT: v_or_b32_e32 v5, v5, v6 -; SI-NEXT: v_add_i32_e32 v6, vcc, 0x68, v0 -; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen -; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload -; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v36 -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) -; SI-NEXT: v_and_b32_e32 v3, 0xffff, v1 -; SI-NEXT: v_and_b32_e32 v1, 0xffff, v62 -; SI-NEXT: v_or_b32_e32 v3, v3, v4 -; SI-NEXT: v_add_i32_e32 v4, vcc, 0x70, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v57 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v11 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v55 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v6 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v59 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v24 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v31 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x6c, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v20 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0 +; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v39 ; SI-NEXT: v_or_b32_e32 v1, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x74, v0 -; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen ; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload ; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload @@ -51676,6 +51843,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i ; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) ; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: .LBB59_4: +; SI-NEXT: s_branch .LBB59_2 ; ; VI-LABEL: bitcast_v60f16_to_v60i16_scalar: ; VI: ; %bb.0: diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll index 4aded5da3668a..685e2fbdecfad 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll @@ -632,51 +632,53 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB5_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s4, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s4, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s19, s17, 24 +; SI-NEXT: s_lshr_b32 s22, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB5_3 ; SI-NEXT: .LBB5_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 -; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s4, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s4, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_add_i32 s18, s18, 3 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_lshr_b32 s19, s17, 24 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[12:13], s[16:17], 8 +; SI-NEXT: s_lshr_b32 s22, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s17, 8 +; SI-NEXT: s_lshr_b64 s[6:7], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 ; SI-NEXT: .LBB5_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_mov_b32_e32 v3, s4 ; SI-NEXT: v_mov_b32_e32 v4, s17 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v5, s23 +; SI-NEXT: v_mov_b32_e32 v6, s22 +; SI-NEXT: v_mov_b32_e32 v7, s19 ; SI-NEXT: v_mov_b32_e32 v8, s18 +; SI-NEXT: v_mov_b32_e32 v9, s14 +; SI-NEXT: v_mov_b32_e32 v10, s10 +; SI-NEXT: v_mov_b32_e32 v11, s6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB5_4: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 +; SI-NEXT: ; implicit-def: $sgpr12 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr19 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 ; SI-NEXT: s_branch .LBB5_2 ; ; VI-LABEL: bitcast_v3i32_to_v12i8_scalar: @@ -3133,31 +3135,29 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB17_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB17_3 ; SI-NEXT: .LBB17_2: ; %cmp.true -; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_add_i32 s18, s18, 3 ; SI-NEXT: s_add_i32 s17, s17, 3 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s4, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_add_i32 s16, s16, 3 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 ; SI-NEXT: .LBB17_3: ; %end ; SI-NEXT: v_mov_b32_e32 v0, s16 +; SI-NEXT: v_mov_b32_e32 v1, s6 ; SI-NEXT: v_mov_b32_e32 v2, s17 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s10 ; SI-NEXT: v_mov_b32_e32 v4, s18 +; SI-NEXT: v_mov_b32_e32 v5, s4 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB17_4: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB17_2 ; ; VI-LABEL: bitcast_v3i32_to_v6i16_scalar: @@ -3762,50 +3762,59 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB21_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v11, s4, v0, 24 -; SI-NEXT: v_alignbit_b32 v10, s4, v0, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v0, 8 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v3, s17, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s17, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 8 -; SI-NEXT: s_lshr_b32 s6, s17, 24 -; SI-NEXT: s_lshr_b32 s7, s17, 16 -; SI-NEXT: s_lshr_b32 s8, s17, 8 +; SI-NEXT: s_lshr_b32 s19, s17, 24 +; SI-NEXT: s_lshr_b32 s22, s17, 16 +; SI-NEXT: s_lshr_b32 s23, s17, 8 +; SI-NEXT: s_lshr_b64 s[10:11], s[18:19], 24 +; SI-NEXT: s_lshr_b64 s[12:13], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[14:15], s[18:19], 8 +; SI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 +; SI-NEXT: s_lshr_b64 s[8:9], s[16:17], 8 ; SI-NEXT: s_cbranch_execnz .LBB21_4 ; SI-NEXT: .LBB21_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v8, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v4, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_alignbit_b32 v11, s4, v8, 24 -; SI-NEXT: v_alignbit_b32 v10, s4, v8, 16 -; SI-NEXT: v_alignbit_b32 v9, s4, v8, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v4 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v4 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v17, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v16, s16, 1.0 +; SI-NEXT: v_lshr_b64 v[11:12], v[8:9], 24 +; SI-NEXT: v_lshr_b64 v[3:4], v[16:17], 24 +; SI-NEXT: v_lshr_b64 v[14:15], v[16:17], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[16:17], 8 +; SI-NEXT: v_lshr_b64 v[12:13], v[8:9], 16 +; SI-NEXT: v_lshr_b64 v[9:10], v[8:9], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v17 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v17 +; SI-NEXT: s_branch .LBB21_5 ; SI-NEXT: .LBB21_3: -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr9 -; SI-NEXT: ; implicit-def: $vgpr10 -; SI-NEXT: ; implicit-def: $vgpr11 +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr23 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr19 +; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr12 +; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: s_branch .LBB21_2 ; SI-NEXT: .LBB21_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v4, s17 +; SI-NEXT: v_mov_b32_e32 v16, s16 +; SI-NEXT: v_mov_b32_e32 v17, s17 ; SI-NEXT: v_mov_b32_e32 v8, s18 -; SI-NEXT: v_mov_b32_e32 v5, s8 -; SI-NEXT: v_mov_b32_e32 v6, s7 -; SI-NEXT: v_mov_b32_e32 v7, s6 +; SI-NEXT: v_mov_b32_e32 v5, s23 +; SI-NEXT: v_mov_b32_e32 v6, s22 +; SI-NEXT: v_mov_b32_e32 v7, s19 +; SI-NEXT: v_mov_b32_e32 v11, s10 +; SI-NEXT: v_mov_b32_e32 v12, s12 +; SI-NEXT: v_mov_b32_e32 v9, s14 +; SI-NEXT: v_mov_b32_e32 v3, s4 +; SI-NEXT: v_mov_b32_e32 v14, s6 +; SI-NEXT: v_mov_b32_e32 v1, s8 +; SI-NEXT: .LBB21_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v16 +; SI-NEXT: v_mov_b32_e32 v2, v14 +; SI-NEXT: v_mov_b32_e32 v4, v17 +; SI-NEXT: v_mov_b32_e32 v10, v12 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v3f32_to_v12i8_scalar: @@ -6282,30 +6291,33 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32 ; SI-NEXT: s_cmp_lg_u32 s19, 0 ; SI-NEXT: s_cbranch_scc0 .LBB33_3 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_mov_b32_e32 v0, s18 -; SI-NEXT: v_alignbit_b32 v5, s4, v0, 16 -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_alignbit_b32 v1, s17, v0, 16 -; SI-NEXT: s_lshr_b32 s6, s17, 16 +; SI-NEXT: s_lshr_b32 s10, s17, 16 +; SI-NEXT: s_lshr_b64 s[4:5], s[18:19], 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[16:17], 16 ; SI-NEXT: s_cbranch_execnz .LBB33_4 ; SI-NEXT: .LBB33_2: ; %cmp.true ; SI-NEXT: v_add_f32_e64 v4, s18, 1.0 -; SI-NEXT: v_add_f32_e64 v2, s17, 1.0 -; SI-NEXT: v_add_f32_e64 v0, s16, 1.0 -; SI-NEXT: v_alignbit_b32 v1, v2, v0, 16 -; SI-NEXT: v_alignbit_b32 v5, s4, v4, 16 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2 -; SI-NEXT: s_setpc_b64 s[30:31] +; SI-NEXT: v_add_f32_e64 v8, s17, 1.0 +; SI-NEXT: v_add_f32_e64 v7, s16, 1.0 +; SI-NEXT: v_lshr_b64 v[1:2], v[7:8], 16 +; SI-NEXT: v_lshr_b64 v[5:6], v[4:5], 16 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v8 +; SI-NEXT: s_branch .LBB33_5 ; SI-NEXT: .LBB33_3: -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr5 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: s_branch .LBB33_2 ; SI-NEXT: .LBB33_4: -; SI-NEXT: v_mov_b32_e32 v0, s16 -; SI-NEXT: v_mov_b32_e32 v2, s17 +; SI-NEXT: v_mov_b32_e32 v7, s16 +; SI-NEXT: v_mov_b32_e32 v8, s17 ; SI-NEXT: v_mov_b32_e32 v4, s18 -; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v3, s10 +; SI-NEXT: v_mov_b32_e32 v5, s4 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: .LBB33_5: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v7 +; SI-NEXT: v_mov_b32_e32 v2, v8 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: bitcast_v3f32_to_v6i16_scalar: @@ -7981,62 +7993,64 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3 ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: s_cmp_lg_u32 s22, 0 -; SI-NEXT: v_mul_f32_e64 v16, 1.0, s17 -; SI-NEXT: v_mul_f32_e64 v17, 1.0, s16 -; SI-NEXT: v_mul_f32_e64 v14, 1.0, s19 -; SI-NEXT: v_mul_f32_e64 v15, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v12, 1.0, s21 -; SI-NEXT: v_mul_f32_e64 v13, 1.0, s20 +; SI-NEXT: v_mul_f32_e64 v17, 1.0, s17 +; SI-NEXT: v_mul_f32_e64 v18, 1.0, s16 +; SI-NEXT: v_mul_f32_e64 v15, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v16, 1.0, s18 +; SI-NEXT: v_mul_f32_e64 v0, 1.0, s21 +; SI-NEXT: v_mul_f32_e64 v14, 1.0, s20 ; SI-NEXT: s_cbranch_scc0 .LBB39_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v16 -; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v14 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v12 -; SI-NEXT: v_alignbit_b32 v0, v0, v17, 16 -; SI-NEXT: v_alignbit_b32 v4, v6, v15, 16 -; SI-NEXT: v_alignbit_b32 v8, v10, v13, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v14 -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v12 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v17 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v15 +; SI-NEXT: v_alignbit_b32 v12, v1, v18, 16 +; SI-NEXT: v_alignbit_b32 v13, v6, v16, 16 +; SI-NEXT: v_lshr_b64 v[3:4], v[12:13], 24 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v0 +; SI-NEXT: v_lshr_b64 v[4:5], v[12:13], 16 +; SI-NEXT: v_alignbit_b32 v8, v10, v14, 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[12:13], 8 +; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v15 +; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v0 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v13 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 ; SI-NEXT: s_cbranch_execnz .LBB39_3 ; SI-NEXT: .LBB39_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 -; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v17 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v17 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v18 +; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v14 -; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v15 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_alignbit_b32 v12, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v15 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 ; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v7 -; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v12 -; SI-NEXT: v_alignbit_b32 v4, v6, v1, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v13 -; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; SI-NEXT: v_alignbit_b32 v13, v6, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v14 +; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 ; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 -; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v11 +; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v0 +; SI-NEXT: v_lshr_b64 v[3:4], v[12:13], 24 ; SI-NEXT: v_alignbit_b32 v8, v10, v1, 16 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_lshr_b64 v[4:5], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[12:13], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v13 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 ; SI-NEXT: v_lshrrev_b32_e32 v7, 24, v7 -; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v11 +; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v0 ; SI-NEXT: .LBB39_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v12 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v13 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB39_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr7 @@ -9524,69 +9538,71 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i ; SI-LABEL: bitcast_v6f16_to_v12i8_scalar: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_cvt_f16_f32_e32 v15, s17 -; SI-NEXT: v_cvt_f16_f32_e32 v14, s16 +; SI-NEXT: v_cvt_f16_f32_e32 v16, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v15, s16 ; SI-NEXT: v_cvt_f16_f32_e32 v6, s19 -; SI-NEXT: v_cvt_f16_f32_e32 v13, s18 +; SI-NEXT: v_cvt_f16_f32_e32 v14, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v10, s21 -; SI-NEXT: v_cvt_f16_f32_e32 v12, s20 +; SI-NEXT: v_cvt_f16_f32_e32 v0, s20 ; SI-NEXT: s_cmp_lg_u32 s22, 0 ; SI-NEXT: s_cbranch_scc0 .LBB43_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v15 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v16 +; SI-NEXT: v_or_b32_e32 v12, v15, v1 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 -; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v10 -; SI-NEXT: v_or_b32_e32 v0, v14, v0 -; SI-NEXT: v_or_b32_e32 v4, v13, v1 -; SI-NEXT: v_or_b32_e32 v8, v12, v7 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_or_b32_e32 v13, v14, v1 +; SI-NEXT: v_lshr_b64 v[1:2], v[12:13], 8 +; SI-NEXT: v_lshr_b64 v[3:4], v[12:13], 24 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10 +; SI-NEXT: v_lshr_b64 v[4:5], v[12:13], 16 +; SI-NEXT: v_or_b32_e32 v8, v0, v2 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v13 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v11, v10, 8, 8 ; SI-NEXT: s_cbranch_execnz .LBB43_3 ; SI-NEXT: .LBB43_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v0, v15 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v14 -; SI-NEXT: v_cvt_f32_f16_e32 v2, v6 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v13 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v16 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v14 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_cvt_f16_f32_e32 v6, v2 -; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_or_b32_e32 v0, v1, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_or_b32_e32 v12, v2, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v10 -; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v3, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v3 +; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v1 -; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v6 -; SI-NEXT: v_or_b32_e32 v4, v2, v3 -; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10 -; SI-NEXT: v_or_b32_e32 v8, v1, v2 -; SI-NEXT: v_alignbit_b32 v3, v4, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, v4, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, v4, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v4 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v6 +; SI-NEXT: v_or_b32_e32 v13, v2, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v10 +; SI-NEXT: v_lshr_b64 v[3:4], v[12:13], 24 +; SI-NEXT: v_or_b32_e32 v8, v0, v1 +; SI-NEXT: v_lshr_b64 v[4:5], v[12:13], 16 +; SI-NEXT: v_lshr_b64 v[1:2], v[12:13], 8 +; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v13 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v8 ; SI-NEXT: v_bfe_u32 v7, v6, 8, 8 ; SI-NEXT: v_bfe_u32 v11, v10, 8, 8 ; SI-NEXT: .LBB43_3: ; %end +; SI-NEXT: v_mov_b32_e32 v0, v12 +; SI-NEXT: v_mov_b32_e32 v2, v4 +; SI-NEXT: v_mov_b32_e32 v4, v13 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB43_4: -; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: ; implicit-def: $vgpr12 ; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 +; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr5 ; SI-NEXT: ; implicit-def: $vgpr7 ; SI-NEXT: ; implicit-def: $vgpr8 @@ -10274,37 +10290,37 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in ; SI-NEXT: s_cmp_lg_u32 s28, 0 ; SI-NEXT: s_cbranch_scc0 .LBB45_4 ; SI-NEXT: ; %bb.1: ; %cmp.false -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 +; SI-NEXT: s_and_b32 s4, s16, 0xff +; SI-NEXT: s_lshl_b32 s5, s17, 8 ; SI-NEXT: s_or_b32 s4, s4, s5 -; SI-NEXT: s_and_b32 s5, s22, 0xff +; SI-NEXT: s_and_b32 s5, s18, 0xff ; SI-NEXT: s_lshl_b32 s5, s5, 16 -; SI-NEXT: s_lshl_b32 s6, s23, 24 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s6, s5 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xff -; SI-NEXT: s_lshl_b32 s4, s4, 16 ; SI-NEXT: s_lshl_b32 s6, s19, 24 -; SI-NEXT: s_or_b32 s4, s6, s4 -; SI-NEXT: s_and_b32 s6, s16, 0xff -; SI-NEXT: s_lshl_b32 s8, s17, 8 -; SI-NEXT: s_or_b32 s6, s6, s8 -; SI-NEXT: s_and_b32 s6, s6, 0xffff -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_or_b32 s6, s6, s4 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s8, s25, 8 -; SI-NEXT: s_or_b32 s4, s4, s8 -; SI-NEXT: s_and_b32 s8, s26, 0xff -; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_lshl_b32 s9, s27, 24 +; SI-NEXT: s_or_b32 s12, s6, s5 +; SI-NEXT: s_and_b32 s5, s24, 0xff +; SI-NEXT: s_lshl_b32 s6, s25, 8 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s6, s26, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s27, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s8, s7, s6 +; SI-NEXT: s_or_b32 s10, s5, s8 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s6, s21, 8 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_and_b32 s6, s22, 0xff +; SI-NEXT: s_lshl_b32 s6, s6, 16 +; SI-NEXT: s_lshl_b32 s7, s23, 24 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s9, s7, s6 +; SI-NEXT: s_or_b32 s13, s5, s9 ; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s10, s9, s8 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 16 -; SI-NEXT: s_or_b32 s8, s4, s10 -; SI-NEXT: s_lshr_b32 s9, s5, 16 -; SI-NEXT: s_lshr_b32 s10, s10, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[12:13], 16 +; SI-NEXT: s_or_b32 s4, s4, s12 +; SI-NEXT: s_lshr_b32 s7, s9, 16 +; SI-NEXT: s_lshr_b32 s11, s8, 16 +; SI-NEXT: s_mov_b32 s5, s13 ; SI-NEXT: s_cbranch_execnz .LBB45_3 ; SI-NEXT: .LBB45_2: ; %cmp.true ; SI-NEXT: s_add_i32 s16, s16, 3 @@ -10318,52 +10334,51 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in ; SI-NEXT: s_lshl_b32 s6, s6, 16 ; SI-NEXT: s_and_b32 s4, s4, 0xffff ; SI-NEXT: s_or_b32 s5, s5, s6 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s20, 0xff -; SI-NEXT: s_lshl_b32 s5, s21, 8 -; SI-NEXT: s_add_i32 s22, s22, 3 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s20, 0xff +; SI-NEXT: s_lshl_b32 s6, s21, 8 +; SI-NEXT: s_add_i32 s22, s22, 3 +; SI-NEXT: s_or_b32 s5, s6, s5 ; SI-NEXT: s_and_b32 s7, s22, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s23, 24 +; SI-NEXT: s_addk_i32 s5, 0x300 +; SI-NEXT: s_lshl_b32 s6, s23, 24 ; SI-NEXT: s_lshl_b32 s7, s7, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s7 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s5, 0xffff +; SI-NEXT: s_or_b32 s6, s6, s7 ; SI-NEXT: s_add_i32 s24, s24, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x3000000 -; SI-NEXT: s_and_b32 s4, s24, 0xff -; SI-NEXT: s_lshl_b32 s5, s25, 8 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s24, 0xff +; SI-NEXT: s_lshl_b32 s7, s25, 8 ; SI-NEXT: s_add_i32 s26, s26, 3 -; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_or_b32 s6, s7, s6 ; SI-NEXT: s_and_b32 s8, s26, 0xff -; SI-NEXT: s_addk_i32 s4, 0x300 -; SI-NEXT: s_lshl_b32 s5, s27, 24 +; SI-NEXT: s_addk_i32 s6, 0x300 +; SI-NEXT: s_lshl_b32 s7, s27, 24 ; SI-NEXT: s_lshl_b32 s8, s8, 16 -; SI-NEXT: s_and_b32 s4, s4, 0xffff -; SI-NEXT: s_or_b32 s5, s5, s8 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s8, s4, 0x3000000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 16 -; SI-NEXT: s_lshr_b32 s9, s7, 16 -; SI-NEXT: s_lshr_b32 s10, s8, 16 +; SI-NEXT: s_and_b32 s6, s6, 0xffff +; SI-NEXT: s_or_b32 s7, s7, s8 +; SI-NEXT: s_add_i32 s4, s4, 0x3000000 +; SI-NEXT: s_add_i32 s5, s5, 0x3000000 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_add_i32 s10, s6, 0x3000000 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 16 +; SI-NEXT: s_lshr_b32 s7, s5, 16 +; SI-NEXT: s_lshr_b32 s11, s10, 16 ; SI-NEXT: .LBB45_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v2, s7 -; SI-NEXT: v_mov_b32_e32 v3, s9 -; SI-NEXT: v_mov_b32_e32 v4, s8 -; SI-NEXT: v_mov_b32_e32 v5, s10 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s6 +; SI-NEXT: v_mov_b32_e32 v2, s5 +; SI-NEXT: v_mov_b32_e32 v3, s7 +; SI-NEXT: v_mov_b32_e32 v4, s10 +; SI-NEXT: v_mov_b32_e32 v5, s11 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB45_4: +; SI-NEXT: ; implicit-def: $sgpr4 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 ; SI-NEXT: ; implicit-def: $sgpr7 -; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr11 ; SI-NEXT: s_branch .LBB45_2 ; ; VI-LABEL: bitcast_v12i8_to_v6i16_scalar: @@ -10954,74 +10969,74 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s6, s4, s5 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 -; SI-NEXT: s_or_b32 s7, s4, s5 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: s_or_b32 s8, s4, s5 -; SI-NEXT: v_alignbit_b32 v3, s7, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s7, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 8 -; SI-NEXT: s_lshr_b32 s9, s7, 8 -; SI-NEXT: s_lshr_b32 s12, s8, 8 -; SI-NEXT: s_and_b32 s10, s19, 0xffff -; SI-NEXT: s_and_b32 s13, s21, 0xffff +; SI-NEXT: s_or_b32 s4, s4, s5 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 +; SI-NEXT: s_or_b32 s5, s5, s6 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_and_b32 s9, s20, 0xffff +; SI-NEXT: s_lshl_b32 s11, s21, 16 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_or_b32 s14, s9, s11 +; SI-NEXT: s_lshr_b32 s7, s5, 8 +; SI-NEXT: s_lshr_b32 s15, s14, 8 +; SI-NEXT: s_and_b32 s9, s19, 0xffff +; SI-NEXT: s_and_b32 s22, s21, 0xffff ; SI-NEXT: s_bfe_u32 s11, s19, 0x80008 -; SI-NEXT: s_bfe_u32 s14, s21, 0x80008 +; SI-NEXT: s_bfe_u32 s23, s21, 0x80008 ; SI-NEXT: s_cbranch_execnz .LBB47_3 ; SI-NEXT: .LBB47_2: ; %cmp.true ; SI-NEXT: s_add_i32 s16, s16, 3 ; SI-NEXT: s_and_b32 s4, s16, 0xffff ; SI-NEXT: s_lshl_b32 s5, s17, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 ; SI-NEXT: s_add_i32 s18, s18, 3 -; SI-NEXT: s_add_i32 s6, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s18, 0xffff -; SI-NEXT: s_lshl_b32 s5, s19, 16 ; SI-NEXT: s_or_b32 s4, s5, s4 +; SI-NEXT: s_and_b32 s5, s18, 0xffff +; SI-NEXT: s_lshl_b32 s6, s19, 16 ; SI-NEXT: s_add_i32 s20, s20, 3 -; SI-NEXT: s_add_i32 s7, s4, 0x30000 -; SI-NEXT: s_and_b32 s4, s20, 0xffff -; SI-NEXT: s_lshl_b32 s5, s21, 16 -; SI-NEXT: s_or_b32 s4, s5, s4 -; SI-NEXT: s_add_i32 s8, s4, 0x30000 -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_alignbit_b32 v3, s7, v0, 24 -; SI-NEXT: v_alignbit_b32 v2, s7, v0, 16 -; SI-NEXT: v_alignbit_b32 v1, s7, v0, 8 -; SI-NEXT: s_lshr_b32 s11, s7, 24 -; SI-NEXT: s_lshr_b32 s10, s7, 16 -; SI-NEXT: s_lshr_b32 s9, s7, 8 -; SI-NEXT: s_lshr_b32 s14, s8, 24 -; SI-NEXT: s_lshr_b32 s13, s8, 16 -; SI-NEXT: s_lshr_b32 s12, s8, 8 +; SI-NEXT: s_or_b32 s5, s6, s5 +; SI-NEXT: s_and_b32 s6, s20, 0xffff +; SI-NEXT: s_lshl_b32 s7, s21, 16 +; SI-NEXT: s_add_i32 s4, s4, 0x30000 +; SI-NEXT: s_add_i32 s5, s5, 0x30000 +; SI-NEXT: s_or_b32 s6, s7, s6 +; SI-NEXT: s_add_i32 s14, s6, 0x30000 +; SI-NEXT: s_lshr_b64 s[6:7], s[4:5], 24 +; SI-NEXT: s_lshr_b64 s[8:9], s[4:5], 16 +; SI-NEXT: s_lshr_b64 s[10:11], s[4:5], 8 +; SI-NEXT: s_lshr_b32 s11, s5, 24 +; SI-NEXT: s_lshr_b32 s9, s5, 16 +; SI-NEXT: s_lshr_b32 s7, s5, 8 +; SI-NEXT: s_lshr_b32 s23, s14, 24 +; SI-NEXT: s_lshr_b32 s22, s14, 16 +; SI-NEXT: s_lshr_b32 s15, s14, 8 ; SI-NEXT: .LBB47_3: ; %end -; SI-NEXT: v_mov_b32_e32 v0, s6 -; SI-NEXT: v_mov_b32_e32 v4, s7 -; SI-NEXT: v_mov_b32_e32 v5, s9 -; SI-NEXT: v_mov_b32_e32 v6, s10 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s10 +; SI-NEXT: v_mov_b32_e32 v2, s8 +; SI-NEXT: v_mov_b32_e32 v3, s6 +; SI-NEXT: v_mov_b32_e32 v4, s5 +; SI-NEXT: v_mov_b32_e32 v5, s7 +; SI-NEXT: v_mov_b32_e32 v6, s9 ; SI-NEXT: v_mov_b32_e32 v7, s11 -; SI-NEXT: v_mov_b32_e32 v8, s8 -; SI-NEXT: v_mov_b32_e32 v9, s12 -; SI-NEXT: v_mov_b32_e32 v10, s13 -; SI-NEXT: v_mov_b32_e32 v11, s14 +; SI-NEXT: v_mov_b32_e32 v8, s14 +; SI-NEXT: v_mov_b32_e32 v9, s15 +; SI-NEXT: v_mov_b32_e32 v10, s22 +; SI-NEXT: v_mov_b32_e32 v11, s23 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB47_4: +; SI-NEXT: ; implicit-def: $sgpr4 +; SI-NEXT: ; implicit-def: $sgpr10 +; SI-NEXT: ; implicit-def: $sgpr8 ; SI-NEXT: ; implicit-def: $sgpr6 -; SI-NEXT: ; implicit-def: $vgpr1 -; SI-NEXT: ; implicit-def: $vgpr2 -; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $sgpr7 ; SI-NEXT: ; implicit-def: $sgpr9 -; SI-NEXT: ; implicit-def: $sgpr10 ; SI-NEXT: ; implicit-def: $sgpr11 -; SI-NEXT: ; implicit-def: $sgpr8 -; SI-NEXT: ; implicit-def: $sgpr12 -; SI-NEXT: ; implicit-def: $sgpr13 ; SI-NEXT: ; implicit-def: $sgpr14 +; SI-NEXT: ; implicit-def: $sgpr15 +; SI-NEXT: ; implicit-def: $sgpr22 +; SI-NEXT: ; implicit-def: $sgpr23 ; SI-NEXT: s_branch .LBB47_2 ; ; VI-LABEL: bitcast_v6i16_to_v12i8_scalar: @@ -12541,44 +12556,45 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3 ; SI-NEXT: v_mul_f32_e64 v11, 1.0, s16 ; SI-NEXT: v_mul_f32_e64 v10, 1.0, s17 ; SI-NEXT: v_mul_f32_e64 v7, 1.0, s18 -; SI-NEXT: v_mul_f32_e64 v6, 1.0, s19 +; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19 ; SI-NEXT: v_mul_f32_e64 v9, 1.0, s20 ; SI-NEXT: v_mul_f32_e64 v8, 1.0, s21 ; SI-NEXT: s_cbranch_scc0 .LBB53_4 ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v11 -; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v10 +; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v10 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v7 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v6 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v9 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v8 ; SI-NEXT: s_cbranch_execnz .LBB53_3 ; SI-NEXT: .LBB53_2: ; %cmp.true -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v10 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v10 ; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v11 -; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 +; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2 ; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v8 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4 ; SI-NEXT: v_alignbit_b32 v0, v2, v0, 16 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v2 +; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v8 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v3 -; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v6 -; SI-NEXT: v_alignbit_b32 v4, v5, v2, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v2 ; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v7 -; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3 +; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1 ; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2 -; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16 -; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; SI-NEXT: v_lshr_b64 v[6:7], v[1:2], 16 +; SI-NEXT: v_alignbit_b32 v4, v5, v9, 16 ; SI-NEXT: .LBB53_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB53_4: ; SI-NEXT: ; implicit-def: $vgpr0 -; SI-NEXT: ; implicit-def: $vgpr1 +; SI-NEXT: ; implicit-def: $vgpr6 ; SI-NEXT: ; implicit-def: $vgpr2 ; SI-NEXT: ; implicit-def: $vgpr3 ; SI-NEXT: ; implicit-def: $vgpr4 @@ -13210,7 +13226,7 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; SI-NEXT: v_cvt_f16_f32_e32 v0, s16 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s17 +; SI-NEXT: v_cvt_f16_f32_e32 v6, s17 ; SI-NEXT: v_cvt_f16_f32_e32 v2, s18 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s19 ; SI-NEXT: v_cvt_f16_f32_e32 v4, s20 @@ -13220,32 +13236,33 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i ; SI-NEXT: ; %bb.1: ; %cmp.false ; SI-NEXT: s_cbranch_execnz .LBB57_3 ; SI-NEXT: .LBB57_2: ; %cmp.true -; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v6 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 -; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3 ; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1 -; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 ; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2 -; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v5 -; SI-NEXT: v_or_b32_e32 v4, v4, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v3 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; SI-NEXT: v_or_b32_e32 v2, v2, v6 +; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v5 +; SI-NEXT: v_lshr_b64 v[6:7], v[1:2], 16 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16 +; SI-NEXT: v_or_b32_e32 v4, v4, v8 ; SI-NEXT: .LBB57_3: ; %end +; SI-NEXT: v_mov_b32_e32 v1, v6 ; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: .LBB57_4: ; SI-NEXT: s_branch .LBB57_2 diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll index b69afb8e301bb..e27164c2d6d69 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll @@ -4610,50 +4610,48 @@ define amdgpu_kernel void @udiv_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: s_lshr_b64 s[6:7], s[10:11], 30 +; GFX6-NEXT: s_mov_b32 s0, s8 +; GFX6-NEXT: s_and_b32 s8, s6, 0x7fff +; GFX6-NEXT: s_and_b32 s6, s4, 0x7fff +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6 ; GFX6-NEXT: s_and_b32 s6, s10, 0x7fff -; GFX6-NEXT: s_and_b32 s7, s4, 0x7fff -; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s7 -; GFX6-NEXT: v_mov_b32_e32 v2, s4 +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s6 +; GFX6-NEXT: s_lshr_b64 s[6:7], s[4:5], 30 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v0 ; GFX6-NEXT: s_bfe_u32 s4, s4, 0xf000f -; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s6 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s4 -; GFX6-NEXT: s_bfe_u32 s7, s10, 0xf000f -; GFX6-NEXT: v_alignbit_b32 v2, s5, v2, 30 -; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 -; GFX6-NEXT: v_cvt_f32_u32_e32 v6, s7 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v7, v5 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 -; GFX6-NEXT: v_trunc_f32_e32 v4, v4 -; GFX6-NEXT: v_mad_f32 v3, -v4, v1, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v4, v4 -; GFX6-NEXT: v_cvt_f32_u32_e32 v2, v2 -; GFX6-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NEXT: v_alignbit_b32 v0, s11, v0, 30 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v1 -; GFX6-NEXT: v_mul_f32_e32 v1, v6, v7 -; GFX6-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX6-NEXT: v_trunc_f32_e32 v1, v1 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc -; GFX6-NEXT: v_mad_f32 v4, -v1, v5, v6 -; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX6-NEXT: v_cvt_f32_u32_e32 v0, v0 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v2 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v5 -; GFX6-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc -; GFX6-NEXT: v_mul_f32_e32 v1, v0, v6 -; GFX6-NEXT: v_trunc_f32_e32 v1, v1 -; GFX6-NEXT: v_cvt_u32_f32_e32 v5, v1 -; GFX6-NEXT: v_mad_f32 v0, -v1, v2, v0 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v2 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v3 -; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v5, vcc -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v4 +; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s4 +; GFX6-NEXT: s_mov_b32 s1, s9 +; GFX6-NEXT: s_bfe_u32 s9, s10, 0xf000f +; GFX6-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_cvt_f32_u32_e32 v4, s9 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v5, v3 +; GFX6-NEXT: s_and_b32 s5, s6, 0x7fff +; GFX6-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s5 +; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v2 +; GFX6-NEXT: v_mul_f32_e32 v0, v4, v5 +; GFX6-NEXT: v_trunc_f32_e32 v0, v0 +; GFX6-NEXT: v_mad_f32 v4, -v0, v3, v4 +; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s8 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v1 +; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v3 +; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v0, vcc +; GFX6-NEXT: v_mul_f32_e32 v0, v5, v6 +; GFX6-NEXT: v_trunc_f32_e32 v0, v0 +; GFX6-NEXT: v_cvt_u32_f32_e32 v4, v0 +; GFX6-NEXT: v_mad_f32 v0, -v0, v1, v5 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1 +; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v3 +; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v4, vcc ; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 +; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 ; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 ; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: s_mov_b32 s0, s8 -; GFX6-NEXT: s_mov_b32 s1, s9 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -4667,46 +4665,44 @@ define amdgpu_kernel void @udiv_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: s_and_b32 s5, s6, 0x7fff +; GFX9-NEXT: s_lshr_b64 s[4:5], s[2:3], 30 +; GFX9-NEXT: s_and_b32 s3, s6, 0x7fff +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s3 +; GFX9-NEXT: s_and_b32 s5, s2, 0x7fff +; GFX9-NEXT: s_bfe_u32 s8, s2, 0xf000f +; GFX9-NEXT: s_lshr_b64 s[2:3], s[6:7], 30 ; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s5 -; GFX9-NEXT: s_and_b32 s4, s2, 0x7fff -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v0 ; GFX9-NEXT: s_bfe_u32 s3, s6, 0xf000f -; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s4 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v1 -; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s3 -; GFX9-NEXT: s_bfe_u32 s2, s2, 0xf000f -; GFX9-NEXT: v_mov_b32_e32 v3, s6 -; GFX9-NEXT: v_alignbit_b32 v3, s7, v3, 30 -; GFX9-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX9-NEXT: v_cvt_f32_u32_e32 v7, s2 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v8, v6 -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 -; GFX9-NEXT: v_trunc_f32_e32 v5, v5 -; GFX9-NEXT: v_mad_f32 v4, -v5, v1, v4 -; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5 -; GFX9-NEXT: v_cvt_f32_u32_e32 v3, v3 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v1 -; GFX9-NEXT: v_mul_f32_e32 v1, v7, v8 -; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX9-NEXT: v_trunc_f32_e32 v1, v1 -; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v5, vcc -; GFX9-NEXT: v_mad_f32 v5, -v1, v6, v7 -; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v3 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, v6 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v1, vcc -; GFX9-NEXT: v_mul_f32_e32 v1, v0, v7 -; GFX9-NEXT: v_trunc_f32_e32 v1, v1 -; GFX9-NEXT: v_cvt_u32_f32_e32 v6, v1 -; GFX9-NEXT: v_mad_f32 v0, -v1, v3, v0 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v3 -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v4 -; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v6, vcc -; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v5 +; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s3 +; GFX9-NEXT: v_cvt_f32_u32_e32 v5, s8 +; GFX9-NEXT: v_mul_f32_e32 v3, v1, v3 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v4 +; GFX9-NEXT: s_and_b32 s2, s2, 0x7fff +; GFX9-NEXT: v_mad_f32 v1, -v3, v0, v1 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 +; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s2 +; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3 +; GFX9-NEXT: v_mul_f32_e32 v0, v5, v6 +; GFX9-NEXT: s_and_b32 s4, s4, 0x7fff +; GFX9-NEXT: v_trunc_f32_e32 v0, v0 +; GFX9-NEXT: v_mad_f32 v5, -v0, v4, v5 +; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s4 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v1 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, v4 +; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v0, vcc +; GFX9-NEXT: v_mul_f32_e32 v0, v6, v7 +; GFX9-NEXT: v_trunc_f32_e32 v0, v0 +; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v0 +; GFX9-NEXT: v_mad_f32 v0, -v0, v1, v6 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1 +; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v4 +; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v5, vcc ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] +; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 ; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 @@ -4797,58 +4793,56 @@ define amdgpu_kernel void @urem_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) +; GFX6-NEXT: s_lshr_b64 s[6:7], s[10:11], 30 ; GFX6-NEXT: s_mov_b32 s0, s8 -; GFX6-NEXT: s_and_b32 s8, s4, 0x7fff -; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s8 -; GFX6-NEXT: s_and_b32 s7, s10, 0x7fff -; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s7 -; GFX6-NEXT: v_mov_b32_e32 v2, s4 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GFX6-NEXT: v_alignbit_b32 v2, s5, v2, 30 -; GFX6-NEXT: s_bfe_u32 s5, s4, 0xf000f -; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s5 -; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 -; GFX6-NEXT: v_trunc_f32_e32 v4, v4 -; GFX6-NEXT: v_mad_f32 v3, -v4, v1, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v4, v4 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v1 -; GFX6-NEXT: s_bfe_u32 s8, s10, 0xf000f -; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s8 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v4, vcc -; GFX6-NEXT: v_mul_lo_u32 v1, v1, s4 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v5 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 -; GFX6-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NEXT: v_sub_i32_e32 v6, vcc, s10, v1 -; GFX6-NEXT: v_mul_f32_e32 v1, v3, v4 -; GFX6-NEXT: v_cvt_f32_u32_e32 v4, v2 -; GFX6-NEXT: v_alignbit_b32 v0, s11, v0, 30 -; GFX6-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX6-NEXT: v_cvt_f32_u32_e32 v7, v0 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v8, v4 -; GFX6-NEXT: v_trunc_f32_e32 v1, v1 -; GFX6-NEXT: v_mad_f32 v3, -v1, v5, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v5 -; GFX6-NEXT: v_mul_f32_e32 v3, v7, v8 -; GFX6-NEXT: v_trunc_f32_e32 v3, v3 -; GFX6-NEXT: v_cvt_u32_f32_e32 v5, v3 -; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc -; GFX6-NEXT: v_mad_f32 v3, -v3, v4, v7 +; GFX6-NEXT: s_and_b32 s8, s6, 0x7fff +; GFX6-NEXT: s_and_b32 s6, s4, 0x7fff +; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6 +; GFX6-NEXT: s_and_b32 s6, s10, 0x7fff +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s6 +; GFX6-NEXT: s_lshr_b64 s[6:7], s[4:5], 30 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GFX6-NEXT: s_and_b32 s5, s6, 0x7fff +; GFX6-NEXT: s_bfe_u32 s6, s4, 0xf000f +; GFX6-NEXT: v_cvt_f32_u32_e32 v3, s6 +; GFX6-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v2 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 +; GFX6-NEXT: s_bfe_u32 s11, s10, 0xf000f +; GFX6-NEXT: v_cvt_f32_u32_e32 v1, s11 +; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc +; GFX6-NEXT: v_mul_lo_u32 v0, v0, s4 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v3 +; GFX6-NEXT: v_cvt_f32_u32_e32 v5, s8 ; GFX6-NEXT: s_lshr_b32 s4, s4, 15 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v4 -; GFX6-NEXT: v_mul_lo_u32 v1, v1, s4 -; GFX6-NEXT: v_addc_u32_e32 v3, vcc, 0, v5, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, v3, v2 -; GFX6-NEXT: s_lshr_b32 s6, s10, 15 -; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s6, v1 -; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v3 -; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v6 -; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 -; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s10, v0 +; GFX6-NEXT: v_mul_f32_e32 v0, v1, v2 +; GFX6-NEXT: v_cvt_f32_u32_e32 v2, s5 +; GFX6-NEXT: v_trunc_f32_e32 v0, v0 +; GFX6-NEXT: v_mad_f32 v1, -v0, v3, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v2 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v3 ; GFX6-NEXT: s_mov_b32 s1, s9 +; GFX6-NEXT: v_addc_u32_e32 v0, vcc, 0, v0, vcc +; GFX6-NEXT: v_mul_f32_e32 v1, v5, v6 +; GFX6-NEXT: v_trunc_f32_e32 v1, v1 +; GFX6-NEXT: v_cvt_u32_f32_e32 v3, v1 +; GFX6-NEXT: v_mad_f32 v1, -v1, v2, v5 +; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v2 +; GFX6-NEXT: v_mul_lo_u32 v0, v0, s4 +; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc +; GFX6-NEXT: v_mul_lo_u32 v1, v1, s5 +; GFX6-NEXT: s_lshr_b32 s9, s10, 15 +; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s9, v0 +; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s8, v1 +; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 +; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 +; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v4 +; GFX6-NEXT: v_lshlrev_b32_e32 v2, 15, v2 +; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -4862,54 +4856,52 @@ define amdgpu_kernel void @urem_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 -; GFX9-NEXT: s_and_b32 s3, s6, 0x7fff +; GFX9-NEXT: s_lshr_b64 s[4:5], s[2:3], 30 +; GFX9-NEXT: s_and_b32 s5, s6, 0x7fff +; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s5 +; GFX9-NEXT: s_and_b32 s3, s2, 0x7fff ; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s3 -; GFX9-NEXT: s_and_b32 s4, s2, 0x7fff -; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s4 -; GFX9-NEXT: s_bfe_u32 s4, s6, 0xf000f -; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v1 -; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s4 -; GFX9-NEXT: v_mov_b32_e32 v3, s6 -; GFX9-NEXT: v_alignbit_b32 v3, s7, v3, 30 -; GFX9-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX9-NEXT: v_trunc_f32_e32 v5, v5 -; GFX9-NEXT: v_mad_f32 v4, -v5, v1, v4 -; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v5 -; GFX9-NEXT: s_bfe_u32 s5, s2, 0xf000f -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v1 -; GFX9-NEXT: v_cvt_f32_u32_e32 v7, s5 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v8, v6 -; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v5, vcc -; GFX9-NEXT: v_cvt_f32_u32_e32 v5, v3 -; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX9-NEXT: v_mul_f32_e32 v4, v7, v8 -; GFX9-NEXT: v_cvt_f32_u32_e32 v8, v0 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v9, v5 +; GFX9-NEXT: s_and_b32 s8, s4, 0x7fff +; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v0 +; GFX9-NEXT: s_lshr_b64 s[4:5], s[6:7], 30 +; GFX9-NEXT: s_bfe_u32 s5, s6, 0xf000f +; GFX9-NEXT: v_cvt_f32_u32_e32 v4, s5 +; GFX9-NEXT: v_mul_f32_e32 v3, v1, v3 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_mad_f32 v1, -v3, v0, v1 +; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3 +; GFX9-NEXT: s_bfe_u32 s9, s2, 0xf000f +; GFX9-NEXT: s_and_b32 s3, s4, 0x7fff +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0 +; GFX9-NEXT: v_cvt_f32_u32_e32 v5, s9 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v4 +; GFX9-NEXT: v_addc_co_u32_e32 v0, vcc, 0, v3, vcc +; GFX9-NEXT: v_cvt_f32_u32_e32 v3, s3 +; GFX9-NEXT: v_mul_f32_e32 v1, v5, v6 +; GFX9-NEXT: v_cvt_f32_u32_e32 v6, s8 +; GFX9-NEXT: v_trunc_f32_e32 v1, v1 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v3 +; GFX9-NEXT: v_mad_f32 v5, -v1, v4, v5 +; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, v4 +; GFX9-NEXT: v_mul_f32_e32 v4, v6, v7 ; GFX9-NEXT: v_trunc_f32_e32 v4, v4 -; GFX9-NEXT: v_mad_f32 v7, -v4, v6, v7 -; GFX9-NEXT: v_cvt_u32_f32_e32 v4, v4 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, v6 -; GFX9-NEXT: v_mul_f32_e32 v6, v8, v9 -; GFX9-NEXT: v_trunc_f32_e32 v6, v6 -; GFX9-NEXT: v_cvt_u32_f32_e32 v7, v6 -; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, 0, v4, vcc -; GFX9-NEXT: v_mad_f32 v6, -v6, v5, v8 -; GFX9-NEXT: s_lshr_b32 s3, s6, 15 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v6|, v5 -; GFX9-NEXT: v_mul_lo_u32 v4, v4, s3 -; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v7, vcc -; GFX9-NEXT: v_mul_lo_u32 v1, v1, s6 -; GFX9-NEXT: v_mul_lo_u32 v3, v5, v3 +; GFX9-NEXT: v_cvt_u32_f32_e32 v5, v4 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc +; GFX9-NEXT: v_mad_f32 v4, -v4, v3, v6 +; GFX9-NEXT: s_lshr_b32 s4, s6, 15 +; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v4|, v3 +; GFX9-NEXT: v_mul_lo_u32 v0, v0, s6 +; GFX9-NEXT: v_mul_lo_u32 v1, v1, s4 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v5, vcc +; GFX9-NEXT: v_mul_lo_u32 v3, v3, s3 ; GFX9-NEXT: s_lshr_b32 s3, s2, 15 -; GFX9-NEXT: v_sub_u32_e32 v4, s3, v4 -; GFX9-NEXT: v_sub_u32_e32 v5, s2, v1 -; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3 -; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v4 +; GFX9-NEXT: v_sub_u32_e32 v4, s2, v0 +; GFX9-NEXT: v_sub_u32_e32 v5, s3, v1 +; GFX9-NEXT: v_sub_u32_e32 v0, s8, v3 +; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v4 +; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v5 ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 ; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 @@ -5006,64 +4998,63 @@ define amdgpu_kernel void @sdiv_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NEXT: s_bfe_i32 s6, s4, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v2, s6 -; GFX6-NEXT: v_mov_b32_e32 v1, s4 -; GFX6-NEXT: v_alignbit_b32 v1, s5, v1, 30 +; GFX6-NEXT: s_lshr_b64 s[6:7], s[10:11], 30 +; GFX6-NEXT: s_bfe_i32 s7, s4, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s7 +; GFX6-NEXT: s_mov_b32 s0, s8 +; GFX6-NEXT: s_mov_b32 s1, s9 +; GFX6-NEXT: s_lshr_b64 s[8:9], s[4:5], 30 ; GFX6-NEXT: s_bfe_i32 s5, s10, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v3, s5 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v2 -; GFX6-NEXT: s_xor_b32 s5, s5, s6 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s5 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GFX6-NEXT: s_xor_b32 s5, s5, s7 ; GFX6-NEXT: s_ashr_i32 s5, s5, 30 ; GFX6-NEXT: s_or_b32 s5, s5, 1 -; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 -; GFX6-NEXT: v_trunc_f32_e32 v4, v4 -; GFX6-NEXT: v_mad_f32 v3, -v4, v2, v3 -; GFX6-NEXT: v_cmp_ge_f32_e64 s[6:7], |v3|, |v2| -; GFX6-NEXT: s_and_b64 s[6:7], s[6:7], exec -; GFX6-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX6-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[12:13], |v1|, |v0| +; GFX6-NEXT: s_and_b64 s[12:13], s[12:13], exec +; GFX6-NEXT: v_cvt_i32_f32_e32 v2, v2 ; GFX6-NEXT: s_cselect_b32 s5, s5, 0 ; GFX6-NEXT: s_bfe_i32 s4, s4, 0xf000f -; GFX6-NEXT: v_cvt_f32_i32_e32 v2, s4 -; GFX6-NEXT: v_add_i32_e32 v3, vcc, s5, v4 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s4 +; GFX6-NEXT: v_add_i32_e32 v2, vcc, s5, v2 ; GFX6-NEXT: s_bfe_i32 s5, s10, 0xf000f -; GFX6-NEXT: v_cvt_f32_i32_e32 v4, s5 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v5, v2 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s5 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v3, v0 ; GFX6-NEXT: s_xor_b32 s4, s5, s4 ; GFX6-NEXT: s_ashr_i32 s4, s4, 30 -; GFX6-NEXT: v_bfe_i32 v1, v1, 0, 15 -; GFX6-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX6-NEXT: v_trunc_f32_e32 v5, v5 -; GFX6-NEXT: v_mad_f32 v4, -v5, v2, v4 +; GFX6-NEXT: s_or_b32 s7, s4, 1 +; GFX6-NEXT: v_mul_f32_e32 v3, v1, v3 +; GFX6-NEXT: v_trunc_f32_e32 v3, v3 +; GFX6-NEXT: v_mad_f32 v1, -v3, v0, v1 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v1|, |v0| +; GFX6-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX6-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GFX6-NEXT: s_cselect_b32 s4, s7, 0 +; GFX6-NEXT: s_bfe_i32 s5, s8, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s5 +; GFX6-NEXT: v_add_i32_e32 v3, vcc, s4, v3 +; GFX6-NEXT: s_bfe_i32 s4, s6, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s4 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v0 +; GFX6-NEXT: s_xor_b32 s4, s4, s5 +; GFX6-NEXT: s_ashr_i32 s4, s4, 30 ; GFX6-NEXT: s_or_b32 s6, s4, 1 -; GFX6-NEXT: v_cvt_i32_f32_e32 v5, v5 -; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v4|, |v2| -; GFX6-NEXT: v_cvt_f32_i32_e32 v2, v1 -; GFX6-NEXT: v_alignbit_b32 v0, s11, v0, 30 +; GFX6-NEXT: v_mul_f32_e32 v4, v1, v4 +; GFX6-NEXT: v_trunc_f32_e32 v4, v4 +; GFX6-NEXT: v_mad_f32 v1, -v4, v0, v1 +; GFX6-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v1|, |v0| ; GFX6-NEXT: s_and_b64 s[4:5], s[4:5], exec ; GFX6-NEXT: s_cselect_b32 s4, s6, 0 -; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 15 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, s4, v5 -; GFX6-NEXT: v_cvt_f32_i32_e32 v5, v0 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v2 -; GFX6-NEXT: v_xor_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GFX6-NEXT: v_or_b32_e32 v0, 1, v0 -; GFX6-NEXT: v_mul_f32_e32 v1, v5, v6 -; GFX6-NEXT: v_trunc_f32_e32 v1, v1 -; GFX6-NEXT: v_mad_f32 v5, -v1, v2, v5 -; GFX6-NEXT: v_cvt_i32_f32_e32 v1, v1 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v5|, |v2| -; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v3 -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v4 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, s4, v4 +; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v3 ; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 +; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 ; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 ; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX6-NEXT: s_mov_b32 s0, s8 -; GFX6-NEXT: s_mov_b32 s1, s9 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -5077,60 +5068,59 @@ define amdgpu_kernel void @sdiv_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: s_bfe_i32 s4, s6, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v3, s4 -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 -; GFX9-NEXT: s_bfe_i32 s3, s2, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v4, s3 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v3 -; GFX9-NEXT: s_xor_b32 s3, s3, s4 +; GFX9-NEXT: s_lshr_b64 s[4:5], s[2:3], 30 +; GFX9-NEXT: s_bfe_i32 s3, s6, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v0, s3 +; GFX9-NEXT: s_bfe_i32 s5, s2, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s5 +; GFX9-NEXT: s_xor_b32 s3, s5, s3 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v0 ; GFX9-NEXT: s_ashr_i32 s3, s3, 30 +; GFX9-NEXT: s_lshr_b64 s[8:9], s[6:7], 30 ; GFX9-NEXT: s_or_b32 s3, s3, 1 -; GFX9-NEXT: v_mul_f32_e32 v5, v4, v5 -; GFX9-NEXT: v_trunc_f32_e32 v5, v5 -; GFX9-NEXT: v_mad_f32 v4, -v5, v3, v4 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], |v4|, |v3| -; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX9-NEXT: v_mul_f32_e32 v3, v1, v3 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_mad_f32 v1, -v3, v0, v1 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[10:11], |v1|, |v0| +; GFX9-NEXT: s_and_b64 s[10:11], s[10:11], exec ; GFX9-NEXT: s_cselect_b32 s3, s3, 0 -; GFX9-NEXT: s_bfe_i32 s4, s6, 0xf000f -; GFX9-NEXT: v_cvt_i32_f32_e32 v5, v5 -; GFX9-NEXT: v_cvt_f32_i32_e32 v3, s4 +; GFX9-NEXT: s_bfe_i32 s5, s6, 0xf000f +; GFX9-NEXT: v_cvt_f32_i32_e32 v0, s5 ; GFX9-NEXT: s_bfe_i32 s2, s2, 0xf000f -; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: v_add_u32_e32 v4, s3, v5 -; GFX9-NEXT: v_cvt_f32_i32_e32 v5, s2 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v3 -; GFX9-NEXT: v_alignbit_b32 v1, s7, v1, 30 -; GFX9-NEXT: s_xor_b32 s2, s2, s4 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GFX9-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v0 +; GFX9-NEXT: s_xor_b32 s2, s2, s5 +; GFX9-NEXT: s_ashr_i32 s2, s2, 30 +; GFX9-NEXT: v_add_u32_e32 v3, s3, v3 +; GFX9-NEXT: v_mul_f32_e32 v4, v1, v4 +; GFX9-NEXT: v_trunc_f32_e32 v4, v4 +; GFX9-NEXT: v_mad_f32 v1, -v4, v0, v1 +; GFX9-NEXT: s_or_b32 s5, s2, 1 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[2:3], |v1|, |v0| +; GFX9-NEXT: s_and_b64 s[2:3], s[2:3], exec +; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX9-NEXT: s_cselect_b32 s2, s5, 0 +; GFX9-NEXT: s_bfe_i32 s3, s8, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v0, s3 +; GFX9-NEXT: v_add_u32_e32 v4, s2, v4 +; GFX9-NEXT: s_bfe_i32 s2, s4, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v5, v0 +; GFX9-NEXT: s_xor_b32 s2, s2, s3 ; GFX9-NEXT: s_ashr_i32 s2, s2, 30 -; GFX9-NEXT: v_mul_f32_e32 v6, v5, v6 -; GFX9-NEXT: v_trunc_f32_e32 v6, v6 -; GFX9-NEXT: v_mad_f32 v5, -v6, v3, v5 -; GFX9-NEXT: v_bfe_i32 v1, v1, 0, 15 ; GFX9-NEXT: s_or_b32 s4, s2, 1 -; GFX9-NEXT: v_cvt_i32_f32_e32 v6, v6 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[2:3], |v5|, |v3| -; GFX9-NEXT: v_cvt_f32_i32_e32 v3, v1 +; GFX9-NEXT: v_mul_f32_e32 v5, v1, v5 +; GFX9-NEXT: v_trunc_f32_e32 v5, v5 +; GFX9-NEXT: v_mad_f32 v1, -v5, v0, v1 +; GFX9-NEXT: v_cvt_i32_f32_e32 v5, v5 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[2:3], |v1|, |v0| ; GFX9-NEXT: s_and_b64 s[2:3], s[2:3], exec ; GFX9-NEXT: s_cselect_b32 s2, s4, 0 -; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 15 -; GFX9-NEXT: v_add_u32_e32 v5, s2, v6 -; GFX9-NEXT: v_cvt_f32_i32_e32 v6, v0 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v3 -; GFX9-NEXT: v_xor_b32_e32 v0, v0, v1 -; GFX9-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GFX9-NEXT: v_or_b32_e32 v0, 1, v0 -; GFX9-NEXT: v_mul_f32_e32 v1, v6, v7 -; GFX9-NEXT: v_trunc_f32_e32 v1, v1 -; GFX9-NEXT: v_cvt_i32_f32_e32 v7, v1 -; GFX9-NEXT: v_mad_f32 v1, -v1, v3, v6 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, |v3| -; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GFX9-NEXT: v_add_u32_e32 v0, v7, v0 -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v4 -; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v5 +; GFX9-NEXT: v_add_u32_e32 v0, s2, v5 +; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v4 ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] +; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 ; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 @@ -5233,74 +5223,73 @@ define amdgpu_kernel void @srem_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX6-NEXT: s_mov_b32 s3, 0xf000 ; GFX6-NEXT: s_mov_b32 s2, -1 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_bfe_i32 s6, s10, 0xf0000 -; GFX6-NEXT: v_mov_b32_e32 v2, s4 -; GFX6-NEXT: v_alignbit_b32 v2, s5, v2, 30 -; GFX6-NEXT: s_bfe_i32 s5, s4, 0xf0000 -; GFX6-NEXT: v_cvt_f32_i32_e32 v4, s5 -; GFX6-NEXT: v_cvt_f32_i32_e32 v5, s6 -; GFX6-NEXT: s_xor_b32 s5, s6, s5 -; GFX6-NEXT: s_ashr_i32 s5, s5, 30 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v4 ; GFX6-NEXT: s_mov_b32 s0, s8 ; GFX6-NEXT: s_mov_b32 s1, s9 -; GFX6-NEXT: s_lshr_b32 s8, s10, 15 -; GFX6-NEXT: v_mul_f32_e32 v6, v5, v6 -; GFX6-NEXT: v_trunc_f32_e32 v6, v6 -; GFX6-NEXT: v_mad_f32 v5, -v6, v4, v5 -; GFX6-NEXT: v_cvt_i32_f32_e32 v6, v6 -; GFX6-NEXT: s_lshr_b32 s9, s4, 15 +; GFX6-NEXT: s_lshr_b64 s[8:9], s[4:5], 30 +; GFX6-NEXT: s_bfe_i32 s5, s4, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s5 +; GFX6-NEXT: s_bfe_i32 s12, s10, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s12 +; GFX6-NEXT: s_xor_b32 s5, s12, s5 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GFX6-NEXT: s_lshr_b64 s[6:7], s[10:11], 30 +; GFX6-NEXT: s_ashr_i32 s5, s5, 30 +; GFX6-NEXT: s_and_b32 s7, s6, 0x7fff +; GFX6-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX6-NEXT: v_trunc_f32_e32 v2, v2 +; GFX6-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX6-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GFX6-NEXT: s_lshr_b32 s11, s10, 15 +; GFX6-NEXT: s_and_b32 s9, s8, 0x7fff +; GFX6-NEXT: s_lshr_b32 s14, s4, 15 ; GFX6-NEXT: s_or_b32 s5, s5, 1 -; GFX6-NEXT: v_cmp_ge_f32_e64 s[6:7], |v5|, |v4| -; GFX6-NEXT: s_and_b64 s[6:7], s[6:7], exec +; GFX6-NEXT: v_cmp_ge_f32_e64 s[12:13], |v1|, |v0| +; GFX6-NEXT: s_and_b64 s[12:13], s[12:13], exec ; GFX6-NEXT: s_cselect_b32 s5, s5, 0 -; GFX6-NEXT: v_add_i32_e32 v4, vcc, s5, v6 -; GFX6-NEXT: v_mul_lo_u32 v4, v4, s4 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, s5, v2 +; GFX6-NEXT: v_mul_lo_u32 v0, v0, s4 ; GFX6-NEXT: s_bfe_i32 s4, s4, 0xf000f -; GFX6-NEXT: v_cvt_f32_i32_e32 v5, s4 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s4 ; GFX6-NEXT: s_bfe_i32 s5, s10, 0xf000f -; GFX6-NEXT: v_cvt_f32_i32_e32 v6, s5 +; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s10, v0 +; GFX6-NEXT: v_cvt_f32_i32_e32 v0, s5 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v3, v1 ; GFX6-NEXT: s_xor_b32 s4, s5, s4 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v7, v5 -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v2 ; GFX6-NEXT: s_ashr_i32 s4, s4, 30 -; GFX6-NEXT: v_bfe_i32 v2, v2, 0, 15 -; GFX6-NEXT: v_mul_f32_e32 v7, v6, v7 -; GFX6-NEXT: v_trunc_f32_e32 v7, v7 -; GFX6-NEXT: v_mad_f32 v6, -v7, v5, v6 +; GFX6-NEXT: s_or_b32 s10, s4, 1 +; GFX6-NEXT: v_mul_f32_e32 v3, v0, v3 +; GFX6-NEXT: v_trunc_f32_e32 v3, v3 +; GFX6-NEXT: v_mad_f32 v0, -v3, v1, v0 +; GFX6-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v0|, |v1| +; GFX6-NEXT: s_and_b64 s[4:5], s[4:5], exec +; GFX6-NEXT: s_cselect_b32 s4, s10, 0 +; GFX6-NEXT: v_add_i32_e32 v0, vcc, s4, v3 +; GFX6-NEXT: s_bfe_i32 s4, s8, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v1, s4 +; GFX6-NEXT: s_bfe_i32 s5, s6, 0xf0000 +; GFX6-NEXT: v_cvt_f32_i32_e32 v3, s5 +; GFX6-NEXT: s_xor_b32 s4, s5, s4 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v4, v1 +; GFX6-NEXT: s_ashr_i32 s4, s4, 30 ; GFX6-NEXT: s_or_b32 s6, s4, 1 -; GFX6-NEXT: v_cvt_i32_f32_e32 v7, v7 -; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v6|, |v5| -; GFX6-NEXT: v_cvt_f32_i32_e32 v6, v2 -; GFX6-NEXT: v_mov_b32_e32 v0, s10 -; GFX6-NEXT: v_alignbit_b32 v0, s11, v0, 30 +; GFX6-NEXT: v_mul_lo_u32 v0, v0, s14 +; GFX6-NEXT: v_mul_f32_e32 v4, v3, v4 +; GFX6-NEXT: v_trunc_f32_e32 v4, v4 +; GFX6-NEXT: v_mad_f32 v3, -v4, v1, v3 +; GFX6-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX6-NEXT: v_cmp_ge_f32_e64 s[4:5], |v3|, |v1| ; GFX6-NEXT: s_and_b64 s[4:5], s[4:5], exec -; GFX6-NEXT: v_and_b32_e32 v1, 0x7fff, v0 ; GFX6-NEXT: s_cselect_b32 s4, s6, 0 -; GFX6-NEXT: v_bfe_i32 v0, v0, 0, 15 -; GFX6-NEXT: v_add_i32_e32 v5, vcc, s4, v7 -; GFX6-NEXT: v_cvt_f32_i32_e32 v7, v0 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v8, v6 -; GFX6-NEXT: v_xor_b32_e32 v0, v0, v2 -; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s10, v4 -; GFX6-NEXT: v_mul_f32_e32 v2, v7, v8 -; GFX6-NEXT: v_trunc_f32_e32 v2, v2 -; GFX6-NEXT: v_mad_f32 v7, -v2, v6, v7 -; GFX6-NEXT: v_cvt_i32_f32_e32 v2, v2 -; GFX6-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GFX6-NEXT: v_or_b32_e32 v0, 1, v0 -; GFX6-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, |v6| -; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GFX6-NEXT: v_mul_lo_u32 v5, v5, s9 -; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2 -; GFX6-NEXT: v_mul_lo_u32 v0, v0, v3 -; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s8, v5 -; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 -; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v1, v0 +; GFX6-NEXT: v_add_i32_e32 v1, vcc, s4, v4 +; GFX6-NEXT: v_mul_lo_u32 v1, v1, s9 +; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s11, v0 +; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v3 +; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s7, v1 ; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 30 -; GFX6-NEXT: v_and_b32_e32 v3, 0x7fff, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 15, v2 -; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX6-NEXT: v_and_b32_e32 v2, 0x7fff, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 15, v3 +; GFX6-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX6-NEXT: s_waitcnt expcnt(0) @@ -5312,78 +5301,77 @@ define amdgpu_kernel void @srem_v3i15(ptr addrspace(1) %out, <3 x i15> %x, <3 x ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34 -; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: v_alignbit_b32 v0, s3, v0, 30 -; GFX9-NEXT: s_bfe_i32 s3, s6, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v4, s3 -; GFX9-NEXT: s_bfe_i32 s4, s2, 0xf0000 -; GFX9-NEXT: v_cvt_f32_i32_e32 v5, s4 -; GFX9-NEXT: s_xor_b32 s3, s4, s3 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v4 -; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_ashr_i32 s3, s3, 30 -; GFX9-NEXT: s_lshr_b32 s8, s2, 15 -; GFX9-NEXT: v_mul_f32_e32 v6, v5, v6 -; GFX9-NEXT: v_trunc_f32_e32 v6, v6 -; GFX9-NEXT: v_mad_f32 v5, -v6, v4, v5 -; GFX9-NEXT: v_cvt_i32_f32_e32 v6, v6 -; GFX9-NEXT: v_alignbit_b32 v1, s7, v1, 30 -; GFX9-NEXT: s_lshr_b32 s7, s6, 15 -; GFX9-NEXT: s_or_b32 s3, s3, 1 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], |v5|, |v4| -; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], exec -; GFX9-NEXT: s_cselect_b32 s3, s3, 0 -; GFX9-NEXT: v_add_u32_e32 v4, s3, v6 -; GFX9-NEXT: s_bfe_i32 s3, s6, 0xf000f -; GFX9-NEXT: v_cvt_f32_i32_e32 v5, s3 -; GFX9-NEXT: s_bfe_i32 s4, s2, 0xf000f -; GFX9-NEXT: v_cvt_f32_i32_e32 v6, s4 -; GFX9-NEXT: s_xor_b32 s3, s4, s3 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v7, v5 -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v1 -; GFX9-NEXT: s_ashr_i32 s3, s3, 30 -; GFX9-NEXT: v_bfe_i32 v1, v1, 0, 15 -; GFX9-NEXT: v_mul_f32_e32 v7, v6, v7 -; GFX9-NEXT: v_trunc_f32_e32 v7, v7 -; GFX9-NEXT: v_mad_f32 v6, -v7, v5, v6 -; GFX9-NEXT: v_cvt_i32_f32_e32 v7, v7 -; GFX9-NEXT: s_or_b32 s3, s3, 1 -; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], |v6|, |v5| -; GFX9-NEXT: v_cvt_f32_i32_e32 v6, v1 +; GFX9-NEXT: s_lshr_b64 s[4:5], s[2:3], 30 +; GFX9-NEXT: s_bfe_i32 s5, s6, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v0, s5 +; GFX9-NEXT: s_lshr_b64 s[8:9], s[6:7], 30 +; GFX9-NEXT: s_bfe_i32 s7, s2, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s7 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GFX9-NEXT: s_xor_b32 s5, s7, s5 +; GFX9-NEXT: s_ashr_i32 s5, s5, 30 +; GFX9-NEXT: s_lshr_b32 s3, s2, 15 +; GFX9-NEXT: v_mul_f32_e32 v2, v1, v2 +; GFX9-NEXT: v_trunc_f32_e32 v2, v2 +; GFX9-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GFX9-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GFX9-NEXT: s_and_b32 s9, s4, 0x7fff +; GFX9-NEXT: s_and_b32 s12, s8, 0x7fff +; GFX9-NEXT: s_lshr_b32 s13, s6, 15 +; GFX9-NEXT: s_or_b32 s5, s5, 1 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[10:11], |v1|, |v0| +; GFX9-NEXT: s_and_b64 s[10:11], s[10:11], exec +; GFX9-NEXT: s_cselect_b32 s5, s5, 0 +; GFX9-NEXT: v_add_u32_e32 v0, s5, v2 +; GFX9-NEXT: s_bfe_i32 s5, s6, 0xf000f +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, s5 +; GFX9-NEXT: v_mul_lo_u32 v0, v0, s6 +; GFX9-NEXT: s_bfe_i32 s6, s2, 0xf000f +; GFX9-NEXT: v_cvt_f32_i32_e32 v2, s6 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v1 +; GFX9-NEXT: s_xor_b32 s5, s6, s5 +; GFX9-NEXT: s_ashr_i32 s5, s5, 30 +; GFX9-NEXT: s_or_b32 s5, s5, 1 +; GFX9-NEXT: v_mul_f32_e32 v3, v2, v3 +; GFX9-NEXT: v_trunc_f32_e32 v3, v3 +; GFX9-NEXT: v_mad_f32 v2, -v3, v1, v2 +; GFX9-NEXT: v_cvt_i32_f32_e32 v3, v3 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[6:7], |v2|, |v1| +; GFX9-NEXT: s_and_b64 s[6:7], s[6:7], exec +; GFX9-NEXT: s_cselect_b32 s5, s5, 0 +; GFX9-NEXT: v_add_u32_e32 v1, s5, v3 +; GFX9-NEXT: s_bfe_i32 s5, s8, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v2, s5 +; GFX9-NEXT: s_bfe_i32 s4, s4, 0xf0000 +; GFX9-NEXT: v_cvt_f32_i32_e32 v3, s4 +; GFX9-NEXT: s_xor_b32 s4, s4, s5 +; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v2 +; GFX9-NEXT: s_ashr_i32 s4, s4, 30 +; GFX9-NEXT: s_or_b32 s6, s4, 1 +; GFX9-NEXT: v_mul_lo_u32 v1, v1, s13 +; GFX9-NEXT: v_mul_f32_e32 v4, v3, v4 +; GFX9-NEXT: v_trunc_f32_e32 v4, v4 +; GFX9-NEXT: v_mad_f32 v3, -v4, v2, v3 +; GFX9-NEXT: v_cvt_i32_f32_e32 v4, v4 +; GFX9-NEXT: v_cmp_ge_f32_e64 s[4:5], |v3|, |v2| ; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], exec -; GFX9-NEXT: s_cselect_b32 s3, s3, 0 -; GFX9-NEXT: v_add_u32_e32 v5, s3, v7 -; GFX9-NEXT: v_bfe_i32 v7, v0, 0, 15 -; GFX9-NEXT: v_cvt_f32_i32_e32 v8, v7 -; GFX9-NEXT: v_rcp_iflag_f32_e32 v9, v6 -; GFX9-NEXT: v_xor_b32_e32 v1, v7, v1 -; GFX9-NEXT: v_ashrrev_i32_e32 v1, 30, v1 -; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 -; GFX9-NEXT: v_mul_f32_e32 v7, v8, v9 -; GFX9-NEXT: v_trunc_f32_e32 v7, v7 -; GFX9-NEXT: v_cvt_i32_f32_e32 v9, v7 -; GFX9-NEXT: v_mad_f32 v7, -v7, v6, v8 -; GFX9-NEXT: v_cmp_ge_f32_e64 vcc, |v7|, |v6| -; GFX9-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc -; GFX9-NEXT: v_mul_lo_u32 v4, v4, s6 -; GFX9-NEXT: v_mul_lo_u32 v5, v5, s7 -; GFX9-NEXT: v_add_u32_e32 v1, v9, v1 -; GFX9-NEXT: v_mul_lo_u32 v1, v1, v3 -; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX9-NEXT: v_sub_u32_e32 v3, s2, v4 -; GFX9-NEXT: v_sub_u32_e32 v4, s8, v5 -; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1 -; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v4 +; GFX9-NEXT: s_cselect_b32 s4, s6, 0 +; GFX9-NEXT: v_add_u32_e32 v2, s4, v4 +; GFX9-NEXT: v_mul_lo_u32 v2, v2, s12 +; GFX9-NEXT: v_sub_u32_e32 v4, s2, v0 +; GFX9-NEXT: v_sub_u32_e32 v5, s3, v1 +; GFX9-NEXT: v_mov_b32_e32 v3, 0 +; GFX9-NEXT: v_sub_u32_e32 v0, s9, v2 +; GFX9-NEXT: v_and_b32_e32 v2, 0x7fff, v4 +; GFX9-NEXT: v_and_b32_e32 v4, 0x7fff, v5 ; GFX9-NEXT: v_lshlrev_b64 v[0:1], 30, v[0:1] -; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff, v3 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 15, v4 -; GFX9-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX9-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX9-NEXT: global_store_dword v2, v0, s[0:1] +; GFX9-NEXT: v_or_b32_e32 v2, v2, v4 +; GFX9-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX9-NEXT: global_store_dword v3, v0, s[0:1] ; GFX9-NEXT: v_and_b32_e32 v0, 0x1fff, v1 -; GFX9-NEXT: global_store_short v2, v0, s[0:1] offset:4 +; GFX9-NEXT: global_store_short v3, v0, s[0:1] offset:4 ; GFX9-NEXT: s_endpgm %r = srem <3 x i15> %x, %y store <3 x i15> %r, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 0490e5a19b4b7..94ba5cdd09df4 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -10908,12 +10908,13 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) { ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fadd_v2bf16: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v1 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v1 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = fadd <2 x bfloat> %a, %b ret <2 x bfloat> %op } @@ -11446,13 +11447,14 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) { ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_perm_b32 v1, v1, v4, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fadd_v4bf16: ; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v2 -; GFX1250-NEXT: v_pk_add_bf16 v1, v1, v3 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v2 +; GFX1250-NEXT: v_pk_add_bf16 v1, v1, v3 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = fadd <4 x bfloat> %a, %b ret <4 x bfloat> %op } @@ -49991,6 +49993,622 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> ret <4 x bfloat> %op } +define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) { +; GCN-LABEL: v_fma_v8bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_fma_f32 v7, v7, v15, v23 +; GCN-NEXT: v_fma_f32 v6, v6, v14, v22 +; GCN-NEXT: v_fma_f32 v5, v5, v13, v21 +; GCN-NEXT: v_fma_f32 v4, v4, v12, v20 +; GCN-NEXT: v_fma_f32 v3, v3, v11, v19 +; GCN-NEXT: v_fma_f32 v2, v2, v10, v18 +; GCN-NEXT: v_fma_f32 v1, v1, v9, v17 +; GCN-NEXT: v_fma_f32 v0, v0, v8, v16 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v8bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_fma_f32 v6, v6, v14, v15 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_fma_f32 v5, v5, v13, v14 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_fma_f32 v4, v4, v12, v13 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_fma_f32 v3, v3, v11, v12 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_fma_f32 v2, v2, v10, v11 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v17 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_fma_f32 v1, v1, v9, v11 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v10 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_fma_f32 v0, v0, v8, v9 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v8bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX8-NEXT: v_fma_f32 v12, v14, v13, v12 +; GFX8-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v12 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13 +; GFX8-NEXT: v_fma_f32 v3, v3, v7, v11 +; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v12 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc +; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v6 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v2 +; GFX8-NEXT: v_fma_f32 v7, v13, v11, v7 +; GFX8-NEXT: v_bfe_u32 v11, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v7 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11 +; GFX8-NEXT: v_fma_f32 v2, v2, v6, v10 +; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc +; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2 +; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v2 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; GFX8-NEXT: v_fma_f32 v6, v11, v10, v6 +; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10 +; GFX8-NEXT: v_fma_f32 v1, v1, v5, v9 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1 +; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 +; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX8-NEXT: v_fma_f32 v5, v10, v9, v5 +; GFX8-NEXT: v_bfe_u32 v9, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v5 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9 +; GFX8-NEXT: v_fma_f32 v0, v0, v4, v8 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc +; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 +; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v0 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v5, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v6, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v7, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v8bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX900-NEXT: v_fma_f32 v12, v14, v13, v12 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_fma_f32 v3, v3, v7, v11 +; GFX900-NEXT: v_add3_u32 v13, v13, v12, s4 +; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v12 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_bfe_u32 v7, v3, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc +; GFX900-NEXT: v_add3_u32 v7, v7, v3, s4 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v3 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v6 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v2 +; GFX900-NEXT: v_fma_f32 v7, v13, v11, v7 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: v_bfe_u32 v11, v7, 16, 1 +; GFX900-NEXT: v_fma_f32 v2, v2, v6, v10 +; GFX900-NEXT: v_add3_u32 v11, v11, v7, s4 +; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_bfe_u32 v6, v2, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc +; GFX900-NEXT: v_add3_u32 v6, v6, v2, s4 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v2 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v1 +; GFX900-NEXT: v_fma_f32 v6, v11, v10, v6 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX900-NEXT: v_fma_f32 v1, v1, v5, v9 +; GFX900-NEXT: v_add3_u32 v10, v10, v6, s4 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v6 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc +; GFX900-NEXT: v_add3_u32 v5, v5, v1, s4 +; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v1 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX900-NEXT: v_fma_f32 v5, v10, v9, v5 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: v_bfe_u32 v9, v5, 16, 1 +; GFX900-NEXT: v_fma_f32 v0, v0, v4, v8 +; GFX900-NEXT: v_add3_u32 v9, v9, v5, s4 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_bfe_u32 v4, v0, 16, 1 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc +; GFX900-NEXT: v_add3_u32 v4, v4, v0, s4 +; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v0 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v5, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v6, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v7, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v8bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v3 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX950-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v10 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v6 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v13, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v9 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v10, v6 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v6, v1, v5 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v9, v5 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v8 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v5, v0, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v6, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v7, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v11, v12 +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v8bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX10-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; GFX10-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v12 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX10-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v3, v14, v7 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v10 +; GFX10-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v10, v13, v15, vcc_lo +; GFX10-NEXT: v_bfe_u32 v13, v3, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX10-NEXT: v_add3_u32 v12, v16, v11, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v9 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX10-NEXT: v_add3_u32 v13, v13, v3, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v3 +; GFX10-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_fmac_f32_e32 v2, v14, v6 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_add3_u32 v6, v16, v7, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v3, v13, v15, vcc_lo +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v8 +; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v4 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX10-NEXT: v_bfe_u32 v14, v2, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX10-NEXT: v_fmac_f32_e32 v9, v1, v5 +; GFX10-NEXT: v_fmac_f32_e32 v15, v18, v16 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v8, v0, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v14, v2, 0x7fff +; GFX10-NEXT: v_bfe_u32 v4, v9, 16, 1 +; GFX10-NEXT: v_bfe_u32 v5, v15, 16, 1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_bfe_u32 v7, v8, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v9 +; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX10-NEXT: v_add3_u32 v2, v5, v15, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v4, v9, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v15 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX10-NEXT: v_add3_u32 v5, v7, v8, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v8 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX10-NEXT: v_perm_b32 v0, v4, v2, 0x7060302 +; GFX10-NEXT: v_perm_b32 v2, v6, v3, 0x7060302 +; GFX10-NEXT: v_perm_b32 v1, v5, v1, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo +; GFX10-NEXT: v_perm_b32 v3, v7, v10, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v8bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v10 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v2, v6 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v14, v13 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_bfe_u32 v10, v11, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v7, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v14, v16, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v10, v11, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v5 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7 +; GFX11TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v2, v6, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_add3_u32 v10, v15, v14, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v14 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v4 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v6.h +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v1, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v10, v12, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v7, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v8 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v4, v10, v11 :: v_dual_and_b32 v5, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v9, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc_lo +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v15, v17, v16 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfe_u32 v12, v15, 16, 1 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v13, v16, v14 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v0, v1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v15, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v13, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v13 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v13, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v9, v11, v5, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v0, v12, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v9, v10, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v8bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v14, v13 :: v_dual_and_b32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v13, v12, 16, 1 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v11, v3, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v10 +; GFX11FAKE16-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v3, v14, v7 +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v10, v13, v15 :: v_dual_and_b32 v7, 0xffff0000, v10 +; GFX11FAKE16-NEXT: v_add3_u32 v12, v16, v11, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v1 +; GFX11FAKE16-NEXT: v_bfe_u32 v13, v3, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v3 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v3, 0x7fff +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v13, v15 :: v_dual_and_b32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v7, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v8 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v9 +; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v14, v6 :: v_dual_and_b32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11FAKE16-NEXT: v_add3_u32 v6, v16, v7, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v9, v1, v5 :: v_dual_and_b32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v14, v2, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v16, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v9 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v8, v0, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v14, v2, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v4, v9, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v18, v16 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v0, v4, v9, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v5, v15, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v15 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_add3_u32 v2, v5, v15, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v8, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v4, v2, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v2, v6, v3, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v5, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v3, v7, v10, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; ; GFX1250-LABEL: v_fma_v8bf16: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 @@ -50000,85 +50618,4043 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> ; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v6, v10 ; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v7, v11 ; GFX1250-NEXT: s_set_pc_i64 s[30:31] -define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) { %op = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) ret <8 x bfloat> %op } -; GFX1250-LABEL: v_fma_v16bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v8, v16 -; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v9, v17 -; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v10, v18 -; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v11, v19 -; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v12, v20 -; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v13, v21 -; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v14, v22 -; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v15, v23 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] define <16 x bfloat> @v_fma_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) { - %op = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) - ret <16 x bfloat> %op -} - -; GFX1250-LABEL: v_fma_v32bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_clause 0x10 -; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64 -; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4 -; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8 -; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12 -; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16 -; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20 -; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24 -; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28 -; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32 -; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36 -; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40 -; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44 -; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48 -; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52 -; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56 -; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60 -; GFX1250-NEXT: scratch_load_b32 v55, off, s32 -; GFX1250-NEXT: s_wait_loadcnt 0xf -; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32 -; GFX1250-NEXT: s_wait_loadcnt 0xe -; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33 -; GFX1250-NEXT: s_wait_loadcnt 0xd -; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34 -; GFX1250-NEXT: s_wait_loadcnt 0xc -; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35 -; GFX1250-NEXT: s_wait_loadcnt 0xb -; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36 -; GFX1250-NEXT: s_wait_loadcnt 0xa -; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37 -; GFX1250-NEXT: s_wait_loadcnt 0x9 -; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38 -; GFX1250-NEXT: s_wait_loadcnt 0x8 -; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39 -; GFX1250-NEXT: s_wait_loadcnt 0x7 -; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48 -; GFX1250-NEXT: s_wait_loadcnt 0x6 -; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49 -; GFX1250-NEXT: s_wait_loadcnt 0x5 -; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50 -; GFX1250-NEXT: s_wait_loadcnt 0x4 -; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51 -; GFX1250-NEXT: s_wait_loadcnt 0x3 -; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52 -; GFX1250-NEXT: s_wait_loadcnt 0x2 -; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53 -; GFX1250-NEXT: s_wait_loadcnt 0x1 -; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54 -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] -define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) { +; GCN-LABEL: v_fma_v16bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v15, v15, v31, v32 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60 +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v14, v14, v30, v31 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56 +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_fma_f32 v13, v13, v29, v30 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_fma_f32 v12, v12, v28, v29 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_fma_f32 v11, v11, v27, v28 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_fma_f32 v10, v10, v26, v27 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_fma_f32 v9, v9, v25, v26 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_fma_f32 v8, v8, v24, v25 +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_fma_f32 v7, v7, v23, v24 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_fma_f32 v6, v6, v22, v23 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_fma_f32 v5, v5, v21, v22 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_fma_f32 v4, v4, v20, v21 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_fma_f32 v3, v3, v19, v20 +; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_fma_f32 v2, v2, v18, v19 +; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v20 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_fma_f32 v1, v1, v17, v18 +; GCN-NEXT: v_fma_f32 v0, v0, v16, v19 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v16bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v15, v15, v31, v32 +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v14, v14, v30, v31 +; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_fma_f32 v13, v13, v29, v30 +; GFX7-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_fma_f32 v12, v12, v28, v29 +; GFX7-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_fma_f32 v11, v11, v27, v28 +; GFX7-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_fma_f32 v10, v10, v26, v27 +; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_fma_f32 v9, v9, v25, v26 +; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_fma_f32 v8, v8, v24, v25 +; GFX7-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_fma_f32 v7, v7, v23, v24 +; GFX7-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_fma_f32 v6, v6, v22, v23 +; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_fma_f32 v5, v5, v21, v22 +; GFX7-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_fma_f32 v4, v4, v20, v21 +; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_fma_f32 v3, v3, v19, v20 +; GFX7-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12 +; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_fma_f32 v2, v2, v18, v19 +; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_fma_f32 v1, v1, v17, v18 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v19 +; GFX7-NEXT: v_fma_f32 v0, v0, v16, v17 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v16bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: v_fma_f32 v24, v26, v25, v24 +; GFX8-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v14 +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: v_fma_f32 v15, v25, v23, v15 +; GFX8-NEXT: v_fma_f32 v6, v6, v14, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v13 +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: v_fma_f32 v14, v23, v22, v14 +; GFX8-NEXT: v_fma_f32 v5, v5, v13, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v12 +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: v_fma_f32 v13, v22, v21, v13 +; GFX8-NEXT: v_fma_f32 v4, v4, v12, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: v_fma_f32 v12, v21, v20, v12 +; GFX8-NEXT: v_fma_f32 v3, v3, v11, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: v_fma_f32 v11, v20, v19, v11 +; GFX8-NEXT: v_fma_f32 v2, v2, v10, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: v_fma_f32 v10, v19, v18, v10 +; GFX8-NEXT: v_fma_f32 v1, v1, v9, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: v_fma_f32 v0, v0, v8, v16 +; GFX8-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v24 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8 +; GFX8-NEXT: v_or_b32_e32 v16, 0x400000, v24 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX8-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GFX8-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v7 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_fma_f32 v9, v18, v17, v9 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v15 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v15 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v6 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v14, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v14 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v14 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v5 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v13, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v13 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v13 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v4, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v4 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v12 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v3 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v11 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX8-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v2, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v2 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v10, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v10 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v10 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v9, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v9 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v9, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v10, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v11, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16 +; GFX8-NEXT: v_alignbit_b32 v4, v4, v13, 16 +; GFX8-NEXT: v_alignbit_b32 v5, v5, v14, 16 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v15, 16 +; GFX8-NEXT: v_alignbit_b32 v7, v7, v8, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v16bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: v_fma_f32 v24, v26, v25, v24 +; GFX900-NEXT: v_fma_f32 v7, v7, v15, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v14 +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v6 +; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: v_fma_f32 v15, v25, v23, v15 +; GFX900-NEXT: v_fma_f32 v6, v6, v14, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v13 +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: v_fma_f32 v14, v23, v22, v14 +; GFX900-NEXT: v_fma_f32 v5, v5, v13, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v12 +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v4 +; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: v_fma_f32 v13, v22, v21, v13 +; GFX900-NEXT: v_fma_f32 v4, v4, v12, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v11 +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v3 +; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: v_fma_f32 v12, v21, v20, v12 +; GFX900-NEXT: v_fma_f32 v3, v3, v11, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v10 +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v2 +; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: v_fma_f32 v11, v20, v19, v11 +; GFX900-NEXT: v_fma_f32 v2, v2, v10, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v9 +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v1 +; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: v_fma_f32 v10, v19, v18, v10 +; GFX900-NEXT: v_fma_f32 v1, v1, v9, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v8 +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v0 +; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: v_fma_f32 v0, v0, v8, v16 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX900-NEXT: v_add3_u32 v8, v8, v24, s4 +; GFX900-NEXT: v_or_b32_e32 v16, 0x400000, v24 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX900-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc +; GFX900-NEXT: v_bfe_u32 v16, v7, 16, 1 +; GFX900-NEXT: v_fma_f32 v9, v18, v17, v9 +; GFX900-NEXT: v_add3_u32 v16, v16, v7, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v7 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v15, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v15, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v15 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX900-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v6, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v6, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v6 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v14, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v14, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v14 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX900-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v5, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v5, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v5 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v13, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v13, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v13 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX900-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v4, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v4, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX900-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v12, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v12, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v12 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v3, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v3, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v3 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v11, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v11, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v11 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX900-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v2, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v2, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v2 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v10, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v10, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v10 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX900-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v1, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v1, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v1 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v9, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v9, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX900-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc +; GFX900-NEXT: v_bfe_u32 v16, v0, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v0, s4 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v0 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v9, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v10, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v11, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4 +; GFX900-NEXT: v_perm_b32 v4, v4, v13, s4 +; GFX900-NEXT: v_perm_b32 v5, v5, v14, s4 +; GFX900-NEXT: v_perm_b32 v6, v6, v15, s4 +; GFX900-NEXT: v_perm_b32 v7, v7, v8, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v16bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 +; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v7 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX950-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v22 +; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v14 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v6 +; GFX950-NEXT: v_fmac_f32_e32 v7, v25, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_fmac_f32_e32 v15, v6, v14 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v21 +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v13 +; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v5 +; GFX950-NEXT: v_fmac_f32_e32 v6, v22, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_fmac_f32_e32 v14, v5, v13 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v20 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v12 +; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v4 +; GFX950-NEXT: v_fmac_f32_e32 v5, v21, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_fmac_f32_e32 v13, v4, v12 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v19 +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11 +; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v3 +; GFX950-NEXT: v_fmac_f32_e32 v4, v20, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v12, v3, v11 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v18 +; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v10 +; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v19, v11 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v11, v2, v10 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v17 +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v9 +; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v18, v10 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v10, v1, v9 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v16 +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v8 +; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v17, v9 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v16 +; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v9, v0, v8 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v9, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v10, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v11, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v12, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v13, v5 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v14, v6 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v15, v7 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v23, v24 +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v16bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v6 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v22 +; GFX10-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v14 +; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v24 +; GFX10-NEXT: v_bfe_u32 v28, v23, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX10-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v7, v26, v15 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v22 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX10-NEXT: v_add3_u32 v24, v28, v23, 0x7fff +; GFX10-NEXT: v_bfe_u32 v26, v7, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v15, v6, v14 +; GFX10-NEXT: v_cndmask_b32_e32 v22, v25, v27, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v5 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_fmac_f32_e32 v6, v27, v14 +; GFX10-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo +; GFX10-NEXT: v_add3_u32 v24, v26, v7, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v7 +; GFX10-NEXT: v_bfe_u32 v26, v15, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX10-NEXT: v_add3_u32 v21, v26, v15, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v14, v5, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v24, v25, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v15 +; GFX10-NEXT: v_bfe_u32 v25, v6, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v4 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_fmac_f32_e32 v5, v26, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo +; GFX10-NEXT: v_add3_u32 v21, v25, v6, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v6 +; GFX10-NEXT: v_bfe_u32 v25, v14, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v20 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_add3_u32 v20, v25, v14, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v13, v4, v12 +; GFX10-NEXT: v_cndmask_b32_e32 v6, v21, v24, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v14 +; GFX10-NEXT: v_bfe_u32 v24, v5, 16, 1 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_fmac_f32_e32 v4, v25, v12 +; GFX10-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo +; GFX10-NEXT: v_add3_u32 v20, v24, v5, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v5 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v10 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX10-NEXT: v_bfe_u32 v24, v13, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v12, v3, v11 +; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX10-NEXT: v_fmac_f32_e32 v19, v26, v25 +; GFX10-NEXT: v_cndmask_b32_e32 v5, v20, v21, vcc_lo +; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1 +; GFX10-NEXT: v_add3_u32 v21, v24, v13, 0x7fff +; GFX10-NEXT: v_bfe_u32 v24, v12, 16, 1 +; GFX10-NEXT: v_bfe_u32 v25, v19, 16, 1 +; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX10-NEXT: v_add3_u32 v11, v20, v4, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v4 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v19 +; GFX10-NEXT: v_fmac_f32_e32 v18, v2, v10 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo +; GFX10-NEXT: v_add3_u32 v11, v24, v12, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v12 +; GFX10-NEXT: v_add3_u32 v24, v25, v19, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_fmac_f32_e32 v2, v25, v10 +; GFX10-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v8 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_bfe_u32 v20, v2, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v17, v1, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v16 +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0 +; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_add3_u32 v1, v20, v2, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v2 +; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_fmac_f32_e32 v16, v0, v8 +; GFX10-NEXT: v_bfe_u32 v0, v17, 16, 1 +; GFX10-NEXT: v_bfe_u32 v27, v18, 16, 1 +; GFX10-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v17 +; GFX10-NEXT: v_add3_u32 v0, v0, v17, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX10-NEXT: v_bfe_u32 v2, v16, 16, 1 +; GFX10-NEXT: v_add3_u32 v8, v8, v24, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v24 +; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v16 +; GFX10-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX10-NEXT: v_add3_u32 v2, v2, v16, 0x7fff +; GFX10-NEXT: v_add3_u32 v12, v27, v18, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v18 +; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v13 +; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16 +; GFX10-NEXT: v_perm_b32 v1, v9, v1, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX10-NEXT: v_perm_b32 v0, v2, v0, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX10-NEXT: v_perm_b32 v2, v8, v10, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo +; GFX10-NEXT: v_perm_b32 v3, v11, v4, 0x7060302 +; GFX10-NEXT: v_perm_b32 v4, v12, v5, 0x7060302 +; GFX10-NEXT: v_perm_b32 v5, v14, v6, 0x7060302 +; GFX10-NEXT: v_perm_b32 v6, v15, v7, 0x7060302 +; GFX10-NEXT: v_perm_b32 v7, v23, v22, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v16bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v15 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_lshlrev_b32 v7, 16, v7 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v22 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v26, v28, v27 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v22, v6, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v13 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v24 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v23, v7, v15 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v25, v29, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v5 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v15, v23, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v24, v26, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v26 +; GFX11TRUE16-NEXT: v_add3_u32 v15, v15, v23, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v24, v24, v26, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v22, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v14, v29, v28 :: v_dual_cndmask_b32 v15, v15, v25 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v24, v27, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v22, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v22 +; GFX11TRUE16-NEXT: v_bfe_u32 v28, v14, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v20, v4, v12 +; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v14 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v5, v13 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v23, v27, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v5, v28, v14, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v21, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v27, v20, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v24 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v22, vcc_lo +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v14, v23, v21, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v25, v24, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v21, v27, v20, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v22, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v18 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v14.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v25, v4 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v23, v26, vcc_lo +; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v12, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v20, v21, v22 :: v_dual_and_b32 v25, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_add3_u32 v21, v23, v12, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v24, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v12 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v20.h +; GFX11TRUE16-NEXT: v_add3_u32 v12, v23, v24, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v19, v3, v11 +; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v21, v22 :: v_dual_and_b32 v22, 0xffff0000, v17 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v18 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX11TRUE16-NEXT: v_bfe_u32 v18, v19, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v22, v25, v23 :: v_dual_fmac_f32 v11, v2, v10 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v19 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v18, v19, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v8 +; GFX11TRUE16-NEXT: v_bfe_u32 v21, v11, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_cndmask_b32 v10, v2, v10 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v12, v18, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v12, v21, v11, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v16 +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v11 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11 +; GFX11TRUE16-NEXT: v_bfe_u32 v11, v17, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v24, v23 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v0, v1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v12, v18, vcc_lo +; GFX11TRUE16-NEXT: v_add3_u32 v11, v11, v17, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v17 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v21, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX11TRUE16-NEXT: v_add3_u32 v12, v19, v22, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v18, v9, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v21 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v21, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v16, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v22 +; GFX11TRUE16-NEXT: v_add3_u32 v16, v18, v9, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v9 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v8.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v0, v19, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v12, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v11.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v18.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v16bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v23 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v15 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v7 +; GFX11FAKE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_and_b32 v23, 0xffff0000, v23 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v23, v7, v15 :: v_dual_lshlrev_b32 v26, 16, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v14 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v24, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11FAKE16-NEXT: v_bfe_u32 v28, v23, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX11FAKE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v24, v28, v23, 0x7fff +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v26, v15 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v22, v25, v27 :: v_dual_and_b32 v15, 0xffff0000, v22 +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23 +; GFX11FAKE16-NEXT: v_bfe_u32 v26, v7, 16, 1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v27, 16, v5 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_add3_u32 v24, v26, v7, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v7 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v7, v24, v25 :: v_dual_and_b32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v15, v6, v14 :: v_dual_lshlrev_b32 v14, 16, v13 +; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v15 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v21 +; GFX11FAKE16-NEXT: v_bfe_u32 v26, v15, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v6, v27, v14 +; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21 +; GFX11FAKE16-NEXT: v_add3_u32 v21, v26, v15, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v4 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v6, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v14, v5, v13 :: v_dual_lshlrev_b32 v5, 16, v20 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v12 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v6, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v6 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v14, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v5, v26, v13 :: v_dual_and_b32 v12, 0xffff0000, v12 +; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v20 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v20, v25, v14, 0x7fff +; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v6, v21, v24 :: v_dual_lshlrev_b32 v25, 16, v3 +; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v14 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v5, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v13, v4, v12 :: v_dual_lshlrev_b32 v4, 16, v19 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v25, v12 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v20, v24, v5, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v5 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v13, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v10 +; GFX11FAKE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v3, v11 :: v_dual_cndmask_b32 v5, v20, v21 +; GFX11FAKE16-NEXT: v_add3_u32 v21, v24, v13, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v13 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v19, 16, v18 +; GFX11FAKE16-NEXT: v_bfe_u32 v20, v4, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v24, v12, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v19, v26, v25 +; GFX11FAKE16-NEXT: v_add3_u32 v11, v20, v4, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v4 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v18, v2, v10 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v17 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v19, 16, 1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v11, v24, v12, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v12 +; GFX11FAKE16-NEXT: v_add3_u32 v24, v25, v19, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v1 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12 +; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v25, v10 :: v_dual_and_b32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v20, v2, 16, 1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v0 +; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_and_b32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_add3_u32 v1, v20, v2, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v24, v26, v25 +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v16, v0, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v17, 16, 1 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v24, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v24 +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v16, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v17, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v17 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17 +; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v24, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v27, v18, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v16, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v16 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24 +; GFX11FAKE16-NEXT: v_add3_u32 v12, v27, v18, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v18 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v9, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v2, v8, v10, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v3, v11, v4, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v4, v12, v5, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v5, v14, v6, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v6, v15, v7, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v7, v23, v22, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: v_fma_v16bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v8, v16 +; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v9, v17 +; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v10, v18 +; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v11, v19 +; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v12, v20 +; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v13, v21 +; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v14, v22 +; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v15, v23 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] + %op = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) + ret <16 x bfloat> %op +} + +define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) { +; GCN-LABEL: v_fma_v32bf16: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256 +; GCN-NEXT: s_waitcnt vmcnt(2) +; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: v_fma_f32 v31, v31, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252 +; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v30, v30, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 +; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v29, v29, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244 +; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v28, v28, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 +; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v27, v27, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236 +; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v26, v26, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232 +; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v25, v25, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228 +; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v24, v24, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224 +; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v23, v23, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v22, v22, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216 +; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v21, v21, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212 +; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v20, v20, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208 +; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v19, v19, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 +; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v18, v18, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200 +; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v17, v17, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196 +; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v16, v16, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192 +; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v15, v15, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188 +; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v14, v14, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184 +; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v13, v13, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180 +; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v12, v12, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 +; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v11, v11, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v10, v10, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168 +; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v9, v9, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164 +; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v8, v8, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 +; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v7, v7, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156 +; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v6, v6, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152 +; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v5, v5, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 +; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v4, v4, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144 +; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v3, v3, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v2, v2, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 +; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v1, v1, v32, v33 +; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 +; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: s_waitcnt vmcnt(1) +; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GCN-NEXT: v_fma_f32 v0, v0, v32, v33 +; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX7-LABEL: v_fma_v32bf16: +; GFX7: ; %bb.0: +; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256 +; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_waitcnt vmcnt(2) +; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: v_fma_f32 v31, v31, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252 +; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v30, v30, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248 +; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v29, v29, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244 +; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v28, v28, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240 +; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v27, v27, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236 +; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v26, v26, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232 +; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v25, v25, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228 +; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v24, v24, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224 +; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v23, v23, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220 +; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v22, v22, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216 +; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v21, v21, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212 +; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v20, v20, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208 +; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v19, v19, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 +; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v18, v18, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200 +; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v17, v17, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196 +; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v16, v16, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192 +; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v15, v15, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188 +; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v14, v14, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184 +; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v13, v13, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180 +; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v12, v12, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 +; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v11, v11, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172 +; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v10, v10, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168 +; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v9, v9, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164 +; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v8, v8, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 +; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v7, v7, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156 +; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v6, v6, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152 +; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v5, v5, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148 +; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v4, v4, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144 +; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v3, v3, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140 +; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v2, v2, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136 +; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v1, v1, v32, v33 +; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4 +; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132 +; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX7-NEXT: s_waitcnt vmcnt(1) +; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32 +; GFX7-NEXT: s_waitcnt vmcnt(0) +; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33 +; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX7-NEXT: v_fma_f32 v0, v0, v32, v33 +; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX7-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_fma_v32bf16: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff +; GFX8-NEXT: s_waitcnt vmcnt(1) +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v32 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v15, v15, v33, v32 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60 +; GFX8-NEXT: v_fma_f32 v31, v31, v35, v34 +; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v30 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v14 +; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v14, v14, v30, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56 +; GFX8-NEXT: v_fma_f32 v32, v34, v32, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v13 +; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v13, v13, v29, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52 +; GFX8-NEXT: v_fma_f32 v30, v34, v30, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v12, v12, v28, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; GFX8-NEXT: v_fma_f32 v29, v34, v29, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v11 +; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v11, v11, v27, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44 +; GFX8-NEXT: v_fma_f32 v28, v34, v28, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v10 +; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v10, v10, v26, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; GFX8-NEXT: v_fma_f32 v27, v34, v27, v35 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v25 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v9 +; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v9, v9, v25, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; GFX8-NEXT: v_fma_f32 v26, v35, v34, v26 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v24 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v8 +; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v8, v8, v24, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX8-NEXT: v_fma_f32 v25, v35, v34, v25 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v7 +; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v7, v7, v23, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28 +; GFX8-NEXT: v_fma_f32 v24, v35, v34, v24 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v6 +; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v6, v6, v22, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24 +; GFX8-NEXT: v_fma_f32 v23, v35, v34, v23 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v5 +; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v5, v5, v21, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20 +; GFX8-NEXT: v_fma_f32 v22, v35, v34, v22 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v4 +; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v4, v4, v20, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 +; GFX8-NEXT: v_fma_f32 v21, v35, v34, v21 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v3 +; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v3, v3, v19, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; GFX8-NEXT: v_fma_f32 v20, v35, v34, v20 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v2 +; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v2, v2, v18, v33 +; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8 +; GFX8-NEXT: v_fma_f32 v19, v35, v34, v19 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v33 +; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX8-NEXT: v_fma_f32 v1, v1, v17, v33 +; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:4 +; GFX8-NEXT: v_fma_f32 v18, v35, v34, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v0 +; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX8-NEXT: s_waitcnt vmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v17 +; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX8-NEXT: v_fma_f32 v0, v0, v16, v17 +; GFX8-NEXT: v_bfe_u32 v16, v31, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v31 +; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v31 +; GFX8-NEXT: v_cndmask_b32_e32 v16, v16, v17, vcc +; GFX8-NEXT: v_bfe_u32 v17, v15, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v15 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX8-NEXT: v_or_b32_e32 v15, 0x400000, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v15, vcc +; GFX8-NEXT: v_bfe_u32 v17, v32, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v32 +; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32 +; GFX8-NEXT: v_or_b32_e32 v31, 0x400000, v32 +; GFX8-NEXT: v_cndmask_b32_e32 v17, v17, v31, vcc +; GFX8-NEXT: v_bfe_u32 v31, v14, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v14 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v14, v31, v14, vcc +; GFX8-NEXT: v_bfe_u32 v31, v30, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v30 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30 +; GFX8-NEXT: v_or_b32_e32 v30, 0x400000, v30 +; GFX8-NEXT: v_cndmask_b32_e32 v30, v31, v30, vcc +; GFX8-NEXT: v_bfe_u32 v31, v13, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v13 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v13, v31, v13, vcc +; GFX8-NEXT: v_bfe_u32 v31, v29, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v29 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29 +; GFX8-NEXT: v_or_b32_e32 v29, 0x400000, v29 +; GFX8-NEXT: v_cndmask_b32_e32 v29, v31, v29, vcc +; GFX8-NEXT: v_bfe_u32 v31, v12, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v12 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX8-NEXT: v_or_b32_e32 v12, 0x400000, v12 +; GFX8-NEXT: v_cndmask_b32_e32 v12, v31, v12, vcc +; GFX8-NEXT: v_bfe_u32 v31, v28, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v28 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28 +; GFX8-NEXT: v_or_b32_e32 v28, 0x400000, v28 +; GFX8-NEXT: v_cndmask_b32_e32 v28, v31, v28, vcc +; GFX8-NEXT: v_bfe_u32 v31, v11, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v11 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v11 +; GFX8-NEXT: v_cndmask_b32_e32 v11, v31, v11, vcc +; GFX8-NEXT: v_bfe_u32 v31, v27, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v27 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27 +; GFX8-NEXT: v_or_b32_e32 v27, 0x400000, v27 +; GFX8-NEXT: v_cndmask_b32_e32 v27, v31, v27, vcc +; GFX8-NEXT: v_bfe_u32 v31, v10, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v10 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v10, v31, v10, vcc +; GFX8-NEXT: v_bfe_u32 v31, v26, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v26 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26 +; GFX8-NEXT: v_or_b32_e32 v26, 0x400000, v26 +; GFX8-NEXT: v_cndmask_b32_e32 v26, v31, v26, vcc +; GFX8-NEXT: v_bfe_u32 v31, v9, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v9 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9 +; GFX8-NEXT: v_cndmask_b32_e32 v9, v31, v9, vcc +; GFX8-NEXT: v_bfe_u32 v31, v25, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v25 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25 +; GFX8-NEXT: v_or_b32_e32 v25, 0x400000, v25 +; GFX8-NEXT: v_cndmask_b32_e32 v25, v31, v25, vcc +; GFX8-NEXT: v_bfe_u32 v31, v8, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v8 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8 +; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v8 +; GFX8-NEXT: v_cndmask_b32_e32 v8, v31, v8, vcc +; GFX8-NEXT: v_bfe_u32 v31, v24, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v24 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX8-NEXT: v_or_b32_e32 v24, 0x400000, v24 +; GFX8-NEXT: v_cndmask_b32_e32 v24, v31, v24, vcc +; GFX8-NEXT: v_bfe_u32 v31, v7, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v7 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7 +; GFX8-NEXT: v_cndmask_b32_e32 v7, v31, v7, vcc +; GFX8-NEXT: v_bfe_u32 v31, v23, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v23 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23 +; GFX8-NEXT: v_or_b32_e32 v23, 0x400000, v23 +; GFX8-NEXT: v_cndmask_b32_e32 v23, v31, v23, vcc +; GFX8-NEXT: v_bfe_u32 v31, v6, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v6 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v31, v6, vcc +; GFX8-NEXT: v_bfe_u32 v31, v22, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v22 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22 +; GFX8-NEXT: v_or_b32_e32 v22, 0x400000, v22 +; GFX8-NEXT: v_cndmask_b32_e32 v22, v31, v22, vcc +; GFX8-NEXT: v_bfe_u32 v31, v5, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v5 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v5, vcc +; GFX8-NEXT: v_bfe_u32 v31, v21, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v21 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21 +; GFX8-NEXT: v_or_b32_e32 v21, 0x400000, v21 +; GFX8-NEXT: v_cndmask_b32_e32 v21, v31, v21, vcc +; GFX8-NEXT: v_bfe_u32 v31, v4, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v4 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v31, v4, vcc +; GFX8-NEXT: v_bfe_u32 v31, v20, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v20 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20 +; GFX8-NEXT: v_or_b32_e32 v20, 0x400000, v20 +; GFX8-NEXT: v_cndmask_b32_e32 v20, v31, v20, vcc +; GFX8-NEXT: v_bfe_u32 v31, v3, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v3 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3 +; GFX8-NEXT: v_cndmask_b32_e32 v3, v31, v3, vcc +; GFX8-NEXT: v_bfe_u32 v31, v19, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v19 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19 +; GFX8-NEXT: v_or_b32_e32 v19, 0x400000, v19 +; GFX8-NEXT: v_cndmask_b32_e32 v19, v31, v19, vcc +; GFX8-NEXT: v_bfe_u32 v31, v2, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v2 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2 +; GFX8-NEXT: v_cndmask_b32_e32 v2, v31, v2, vcc +; GFX8-NEXT: v_bfe_u32 v31, v18, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v18 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18 +; GFX8-NEXT: v_cndmask_b32_e32 v18, v31, v18, vcc +; GFX8-NEXT: v_bfe_u32 v31, v1, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_fma_f32 v33, v35, v34, v33 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v1 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v31, v1, vcc +; GFX8-NEXT: v_bfe_u32 v31, v33, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v33 +; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33 +; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v33 +; GFX8-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc +; GFX8-NEXT: v_bfe_u32 v32, v0, 16, 1 +; GFX8-NEXT: v_add_u32_e32 v32, vcc, v32, v0 +; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32 +; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX8-NEXT: v_or_b32_e32 v0, 0x400000, v0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc +; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9 +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11 +; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15 +; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14 +; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13 +; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12 +; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16 +; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16 +; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16 +; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16 +; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16 +; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16 +; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16 +; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16 +; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16 +; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16 +; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16 +; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16 +; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16 +; GFX8-NEXT: v_alignbit_b32 v13, v13, v30, 16 +; GFX8-NEXT: v_alignbit_b32 v14, v14, v17, 16 +; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16 +; GFX8-NEXT: s_setpc_b64 s[30:31] +; +; GFX900-LABEL: v_fma_v32bf16: +; GFX900: ; %bb.0: +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX900-NEXT: v_lshlrev_b32_e32 v31, 16, v15 +; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: s_waitcnt vmcnt(1) +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v32 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v15, v15, v33, v32 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60 +; GFX900-NEXT: v_fma_f32 v31, v31, v35, v34 +; GFX900-NEXT: v_lshlrev_b32_e32 v32, 16, v30 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v14 +; GFX900-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v31, v31 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v14, v14, v30, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56 +; GFX900-NEXT: v_fma_f32 v32, v34, v32, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v30, 16, v29 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v13 +; GFX900-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v13, v13, v29, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52 +; GFX900-NEXT: v_fma_f32 v30, v34, v30, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v29, 16, v28 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v12 +; GFX900-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v12, v12, v28, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48 +; GFX900-NEXT: v_fma_f32 v29, v34, v29, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v28, 16, v27 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v11 +; GFX900-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v11, v11, v27, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44 +; GFX900-NEXT: v_fma_f32 v28, v34, v28, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v27, 16, v26 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v10 +; GFX900-NEXT: v_and_b32_e32 v26, 0xffff0000, v26 +; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v10, v10, v26, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40 +; GFX900-NEXT: v_fma_f32 v27, v34, v27, v35 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v25 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v9 +; GFX900-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v9, v9, v25, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36 +; GFX900-NEXT: v_fma_f32 v26, v35, v34, v26 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v24 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v8 +; GFX900-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v8, v8, v24, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX900-NEXT: v_fma_f32 v25, v35, v34, v25 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v7 +; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v7, v7, v23, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28 +; GFX900-NEXT: v_fma_f32 v24, v35, v34, v24 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v6 +; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v6, v6, v22, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24 +; GFX900-NEXT: v_fma_f32 v23, v35, v34, v23 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v5 +; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v5, v5, v21, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20 +; GFX900-NEXT: v_fma_f32 v22, v35, v34, v22 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v4 +; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v4, v4, v20, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16 +; GFX900-NEXT: v_fma_f32 v21, v35, v34, v21 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v3 +; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v3, v3, v19, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12 +; GFX900-NEXT: v_fma_f32 v20, v35, v34, v20 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v2 +; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v2, v2, v18, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8 +; GFX900-NEXT: v_fma_f32 v19, v35, v34, v19 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v17 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v1 +; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v1, v1, v17, v33 +; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4 +; GFX900-NEXT: v_fma_f32 v18, v35, v34, v18 +; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v16 +; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v0 +; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v33 +; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX900-NEXT: v_fma_f32 v0, v0, v16, v33 +; GFX900-NEXT: v_bfe_u32 v16, v31, 16, 1 +; GFX900-NEXT: v_add3_u32 v16, v16, v31, s4 +; GFX900-NEXT: v_or_b32_e32 v31, 0x400000, v31 +; GFX900-NEXT: v_cndmask_b32_e32 v16, v16, v31, vcc +; GFX900-NEXT: v_bfe_u32 v31, v15, 16, 1 +; GFX900-NEXT: v_add3_u32 v31, v31, v15, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15 +; GFX900-NEXT: v_or_b32_e32 v15, 0x400000, v15 +; GFX900-NEXT: v_cndmask_b32_e32 v15, v31, v15, vcc +; GFX900-NEXT: v_bfe_u32 v31, v32, 16, 1 +; GFX900-NEXT: v_add3_u32 v31, v31, v32, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v32, v32 +; GFX900-NEXT: v_or_b32_e32 v32, 0x400000, v32 +; GFX900-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc +; GFX900-NEXT: v_bfe_u32 v32, v14, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v14, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14 +; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v14 +; GFX900-NEXT: v_cndmask_b32_e32 v14, v32, v14, vcc +; GFX900-NEXT: v_bfe_u32 v32, v30, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v30, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v30, v30 +; GFX900-NEXT: v_or_b32_e32 v30, 0x400000, v30 +; GFX900-NEXT: v_cndmask_b32_e32 v30, v32, v30, vcc +; GFX900-NEXT: v_bfe_u32 v32, v13, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v13, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13 +; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v13 +; GFX900-NEXT: v_cndmask_b32_e32 v13, v32, v13, vcc +; GFX900-NEXT: v_bfe_u32 v32, v29, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v29, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v29, v29 +; GFX900-NEXT: v_or_b32_e32 v29, 0x400000, v29 +; GFX900-NEXT: v_cndmask_b32_e32 v29, v32, v29, vcc +; GFX900-NEXT: v_bfe_u32 v32, v12, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v12, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12 +; GFX900-NEXT: v_or_b32_e32 v12, 0x400000, v12 +; GFX900-NEXT: v_cndmask_b32_e32 v12, v32, v12, vcc +; GFX900-NEXT: v_bfe_u32 v32, v28, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v28, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v28, v28 +; GFX900-NEXT: v_or_b32_e32 v28, 0x400000, v28 +; GFX900-NEXT: v_cndmask_b32_e32 v28, v32, v28, vcc +; GFX900-NEXT: v_bfe_u32 v32, v11, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v11, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11 +; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v11 +; GFX900-NEXT: v_cndmask_b32_e32 v11, v32, v11, vcc +; GFX900-NEXT: v_bfe_u32 v32, v27, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v27, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v27, v27 +; GFX900-NEXT: v_or_b32_e32 v27, 0x400000, v27 +; GFX900-NEXT: v_cndmask_b32_e32 v27, v32, v27, vcc +; GFX900-NEXT: v_bfe_u32 v32, v10, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v10, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10 +; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v10 +; GFX900-NEXT: v_cndmask_b32_e32 v10, v32, v10, vcc +; GFX900-NEXT: v_bfe_u32 v32, v26, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v26, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v26, v26 +; GFX900-NEXT: v_or_b32_e32 v26, 0x400000, v26 +; GFX900-NEXT: v_cndmask_b32_e32 v26, v32, v26, vcc +; GFX900-NEXT: v_bfe_u32 v32, v9, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v9, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9 +; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v9 +; GFX900-NEXT: v_cndmask_b32_e32 v9, v32, v9, vcc +; GFX900-NEXT: v_bfe_u32 v32, v25, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v25, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v25, v25 +; GFX900-NEXT: v_or_b32_e32 v25, 0x400000, v25 +; GFX900-NEXT: v_cndmask_b32_e32 v25, v32, v25, vcc +; GFX900-NEXT: v_bfe_u32 v32, v8, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v8, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v8, v8 +; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v8 +; GFX900-NEXT: v_cndmask_b32_e32 v8, v32, v8, vcc +; GFX900-NEXT: v_bfe_u32 v32, v24, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v24, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24 +; GFX900-NEXT: v_or_b32_e32 v24, 0x400000, v24 +; GFX900-NEXT: v_cndmask_b32_e32 v24, v32, v24, vcc +; GFX900-NEXT: v_bfe_u32 v32, v7, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v7, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7 +; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v7 +; GFX900-NEXT: v_cndmask_b32_e32 v7, v32, v7, vcc +; GFX900-NEXT: v_bfe_u32 v32, v23, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v23, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v23, v23 +; GFX900-NEXT: v_or_b32_e32 v23, 0x400000, v23 +; GFX900-NEXT: v_cndmask_b32_e32 v23, v32, v23, vcc +; GFX900-NEXT: v_bfe_u32 v32, v6, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v6, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 +; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v6 +; GFX900-NEXT: v_cndmask_b32_e32 v6, v32, v6, vcc +; GFX900-NEXT: v_bfe_u32 v32, v22, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v22, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v22, v22 +; GFX900-NEXT: v_or_b32_e32 v22, 0x400000, v22 +; GFX900-NEXT: v_cndmask_b32_e32 v22, v32, v22, vcc +; GFX900-NEXT: v_bfe_u32 v32, v5, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v5, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5 +; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v5 +; GFX900-NEXT: v_cndmask_b32_e32 v5, v32, v5, vcc +; GFX900-NEXT: v_bfe_u32 v32, v21, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v21, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v21, v21 +; GFX900-NEXT: v_or_b32_e32 v21, 0x400000, v21 +; GFX900-NEXT: v_cndmask_b32_e32 v21, v32, v21, vcc +; GFX900-NEXT: v_bfe_u32 v32, v4, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v4, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4 +; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v4 +; GFX900-NEXT: v_cndmask_b32_e32 v4, v32, v4, vcc +; GFX900-NEXT: v_bfe_u32 v32, v20, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v20, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v20, v20 +; GFX900-NEXT: v_or_b32_e32 v20, 0x400000, v20 +; GFX900-NEXT: v_cndmask_b32_e32 v20, v32, v20, vcc +; GFX900-NEXT: v_bfe_u32 v32, v3, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v3, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 +; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v3 +; GFX900-NEXT: v_cndmask_b32_e32 v3, v32, v3, vcc +; GFX900-NEXT: v_bfe_u32 v32, v19, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v19, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v19, v19 +; GFX900-NEXT: v_or_b32_e32 v19, 0x400000, v19 +; GFX900-NEXT: v_cndmask_b32_e32 v19, v32, v19, vcc +; GFX900-NEXT: v_bfe_u32 v32, v2, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v2, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2 +; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v2 +; GFX900-NEXT: v_cndmask_b32_e32 v2, v32, v2, vcc +; GFX900-NEXT: v_bfe_u32 v32, v18, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v18, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v18, v18 +; GFX900-NEXT: v_or_b32_e32 v18, 0x400000, v18 +; GFX900-NEXT: v_cndmask_b32_e32 v18, v32, v18, vcc +; GFX900-NEXT: v_bfe_u32 v32, v1, 16, 1 +; GFX900-NEXT: v_fma_f32 v17, v35, v34, v17 +; GFX900-NEXT: v_add3_u32 v32, v32, v1, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 +; GFX900-NEXT: v_or_b32_e32 v1, 0x400000, v1 +; GFX900-NEXT: v_cndmask_b32_e32 v1, v32, v1, vcc +; GFX900-NEXT: v_bfe_u32 v32, v17, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v17, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v17, v17 +; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v17 +; GFX900-NEXT: v_cndmask_b32_e32 v17, v32, v17, vcc +; GFX900-NEXT: v_bfe_u32 v32, v0, 16, 1 +; GFX900-NEXT: v_add3_u32 v32, v32, v0, s4 +; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 +; GFX900-NEXT: v_or_b32_e32 v0, 0x400000, v0 +; GFX900-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc +; GFX900-NEXT: s_mov_b32 s4, 0x7060302 +; GFX900-NEXT: v_perm_b32 v0, v0, v17, s4 +; GFX900-NEXT: v_perm_b32 v1, v1, v18, s4 +; GFX900-NEXT: v_perm_b32 v2, v2, v19, s4 +; GFX900-NEXT: v_perm_b32 v3, v3, v20, s4 +; GFX900-NEXT: v_perm_b32 v4, v4, v21, s4 +; GFX900-NEXT: v_perm_b32 v5, v5, v22, s4 +; GFX900-NEXT: v_perm_b32 v6, v6, v23, s4 +; GFX900-NEXT: v_perm_b32 v7, v7, v24, s4 +; GFX900-NEXT: v_perm_b32 v8, v8, v25, s4 +; GFX900-NEXT: v_perm_b32 v9, v9, v26, s4 +; GFX900-NEXT: v_perm_b32 v10, v10, v27, s4 +; GFX900-NEXT: v_perm_b32 v11, v11, v28, s4 +; GFX900-NEXT: v_perm_b32 v12, v12, v29, s4 +; GFX900-NEXT: v_perm_b32 v13, v13, v30, s4 +; GFX900-NEXT: v_perm_b32 v14, v14, v31, s4 +; GFX900-NEXT: v_perm_b32 v15, v15, v16, s4 +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX950-LABEL: v_fma_v32bf16: +; GFX950: ; %bb.0: +; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: scratch_load_dword v35, off, s32 offset:64 +; GFX950-NEXT: scratch_load_dword v36, off, s32 +; GFX950-NEXT: scratch_load_dword v38, off, s32 offset:60 +; GFX950-NEXT: scratch_load_dword v39, off, s32 offset:56 +; GFX950-NEXT: scratch_load_dword v48, off, s32 offset:52 +; GFX950-NEXT: scratch_load_dword v49, off, s32 offset:48 +; GFX950-NEXT: scratch_load_dword v50, off, s32 offset:44 +; GFX950-NEXT: scratch_load_dword v51, off, s32 offset:40 +; GFX950-NEXT: scratch_load_dword v52, off, s32 offset:36 +; GFX950-NEXT: scratch_load_dword v53, off, s32 offset:32 +; GFX950-NEXT: scratch_load_dword v54, off, s32 offset:28 +; GFX950-NEXT: scratch_load_dword v31, off, s32 offset:4 +; GFX950-NEXT: scratch_load_dword v32, off, s32 offset:8 +; GFX950-NEXT: scratch_load_dword v33, off, s32 offset:12 +; GFX950-NEXT: scratch_load_dword v34, off, s32 offset:16 +; GFX950-NEXT: scratch_load_dword v37, off, s32 offset:20 +; GFX950-NEXT: scratch_load_dword v55, off, s32 offset:24 +; GFX950-NEXT: v_accvgpr_write_b32 a3, v43 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a5, v45 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a6, v46 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a8, v56 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a11, v59 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a13, v61 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a14, v62 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a15, v63 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v43, 0xffff0000, v14 +; GFX950-NEXT: v_lshlrev_b32_e32 v45, 16, v14 +; GFX950-NEXT: v_and_b32_e32 v46, 0xffff0000, v29 +; GFX950-NEXT: v_lshlrev_b32_e32 v56, 16, v29 +; GFX950-NEXT: v_and_b32_e32 v59, 0xffff0000, v12 +; GFX950-NEXT: v_lshlrev_b32_e32 v61, 16, v12 +; GFX950-NEXT: v_and_b32_e32 v62, 0xffff0000, v27 +; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX950-NEXT: v_accvgpr_write_b32 a2, v42 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a4, v44 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a7, v47 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a9, v57 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v42, 0xffff0000, v30 +; GFX950-NEXT: v_lshlrev_b32_e32 v44, 16, v30 +; GFX950-NEXT: v_and_b32_e32 v47, 0xffff0000, v13 +; GFX950-NEXT: v_lshlrev_b32_e32 v57, 16, v13 +; GFX950-NEXT: v_accvgpr_write_b32 a0, v40 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a1, v41 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v40, 0xffff0000, v15 +; GFX950-NEXT: v_lshlrev_b32_e32 v41, 16, v15 +; GFX950-NEXT: v_accvgpr_write_b32 a10, v58 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_write_b32 a12, v60 ; Reload Reuse +; GFX950-NEXT: v_and_b32_e32 v58, 0xffff0000, v28 +; GFX950-NEXT: v_lshlrev_b32_e32 v60, 16, v28 +; GFX950-NEXT: s_waitcnt vmcnt(16) +; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v35 +; GFX950-NEXT: s_waitcnt vmcnt(15) +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v36 +; GFX950-NEXT: v_lshlrev_b32_e32 v63, 16, v36 +; GFX950-NEXT: s_waitcnt vmcnt(14) +; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v38 +; GFX950-NEXT: v_lshlrev_b32_e32 v29, 16, v38 +; GFX950-NEXT: s_waitcnt vmcnt(11) +; GFX950-NEXT: v_and_b32_e32 v36, 0xffff0000, v49 +; GFX950-NEXT: v_and_b32_e32 v38, 0xffff0000, v11 +; GFX950-NEXT: v_fmac_f32_e32 v36, v38, v62 +; GFX950-NEXT: v_lshlrev_b32_e32 v38, 16, v49 +; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v39 +; GFX950-NEXT: v_lshlrev_b32_e32 v30, 16, v39 +; GFX950-NEXT: v_fmac_f32_e32 v38, v11, v27 +; GFX950-NEXT: s_waitcnt vmcnt(10) +; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v50 +; GFX950-NEXT: v_and_b32_e32 v27, 0xffff0000, v26 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v10 +; GFX950-NEXT: v_fmac_f32_e32 v11, v39, v27 +; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v50 +; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX950-NEXT: v_fmac_f32_e32 v27, v10, v26 +; GFX950-NEXT: s_waitcnt vmcnt(9) +; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v51 +; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v25 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v9 +; GFX950-NEXT: v_fmac_f32_e32 v10, v39, v26 +; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v51 +; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX950-NEXT: v_fmac_f32_e32 v26, v9, v25 +; GFX950-NEXT: s_waitcnt vmcnt(8) +; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v52 +; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v24 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v8 +; GFX950-NEXT: v_fmac_f32_e32 v9, v39, v25 +; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v52 +; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX950-NEXT: v_fmac_f32_e32 v25, v8, v24 +; GFX950-NEXT: s_waitcnt vmcnt(7) +; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v53 +; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v7 +; GFX950-NEXT: v_fmac_f32_e32 v8, v39, v24 +; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v53 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX950-NEXT: v_fmac_f32_e32 v24, v7, v23 +; GFX950-NEXT: s_waitcnt vmcnt(6) +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v54 +; GFX950-NEXT: v_and_b32_e32 v23, 0xffff0000, v22 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v6 +; GFX950-NEXT: v_fmac_f32_e32 v7, v39, v23 +; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v54 +; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX950-NEXT: v_fmac_f32_e32 v23, v6, v22 +; GFX950-NEXT: s_waitcnt vmcnt(0) +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v55 +; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v21 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v5 +; GFX950-NEXT: v_fmac_f32_e32 v6, v39, v22 +; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v55 +; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX950-NEXT: v_fmac_f32_e32 v22, v5, v21 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v37 +; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v20 +; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v4 +; GFX950-NEXT: v_fmac_f32_e32 v5, v39, v21 +; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v37 +; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX950-NEXT: v_fmac_f32_e32 v21, v4, v20 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v34 +; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v19 +; GFX950-NEXT: v_and_b32_e32 v37, 0xffff0000, v3 +; GFX950-NEXT: v_fmac_f32_e32 v4, v37, v20 +; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v34 +; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX950-NEXT: v_fmac_f32_e32 v20, v3, v19 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v33 +; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v18 +; GFX950-NEXT: v_and_b32_e32 v34, 0xffff0000, v2 +; GFX950-NEXT: v_fmac_f32_e32 v3, v34, v19 +; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v33 +; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX950-NEXT: v_fmac_f32_e32 v19, v2, v18 +; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v32 +; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v17 +; GFX950-NEXT: v_and_b32_e32 v33, 0xffff0000, v1 +; GFX950-NEXT: v_fmac_f32_e32 v2, v33, v18 +; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v32 +; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX950-NEXT: v_fmac_f32_e32 v18, v1, v17 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v31 +; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v16 +; GFX950-NEXT: v_and_b32_e32 v32, 0xffff0000, v0 +; GFX950-NEXT: v_lshlrev_b32_e32 v28, 16, v35 +; GFX950-NEXT: v_fmac_f32_e32 v15, v40, v12 +; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v48 +; GFX950-NEXT: v_lshlrev_b32_e32 v35, 16, v48 +; GFX950-NEXT: v_fmac_f32_e32 v1, v32, v17 +; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v31 +; GFX950-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX950-NEXT: v_fmac_f32_e32 v28, v41, v63 +; GFX950-NEXT: v_fmac_f32_e32 v14, v43, v42 +; GFX950-NEXT: v_fmac_f32_e32 v29, v45, v44 +; GFX950-NEXT: v_fmac_f32_e32 v13, v47, v46 +; GFX950-NEXT: v_fmac_f32_e32 v30, v57, v56 +; GFX950-NEXT: v_fmac_f32_e32 v12, v59, v58 +; GFX950-NEXT: v_fmac_f32_e32 v35, v61, v60 +; GFX950-NEXT: v_fmac_f32_e32 v17, v0, v16 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v17, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v18, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v19, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v20, v4 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v21, v5 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v22, v6 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v23, v7 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v24, v8 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v8, v25, v9 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v9, v26, v10 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v10, v27, v11 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v11, v38, v36 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v12, v35, v12 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v13, v30, v13 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v14, v29, v14 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v15, v28, v15 +; GFX950-NEXT: v_accvgpr_read_b32 v63, a15 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v62, a14 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v61, a13 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v60, a12 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v59, a11 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v58, a10 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v57, a9 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v56, a8 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v47, a7 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v46, a6 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v45, a5 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v44, a4 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v43, a3 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v42, a2 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v41, a1 ; Reload Reuse +; GFX950-NEXT: v_accvgpr_read_b32 v40, a0 ; Reload Reuse +; GFX950-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_fma_v32bf16: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_clause 0x8 +; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64 +; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 +; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60 +; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56 +; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52 +; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48 +; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:44 +; GFX10-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:40 +; GFX10-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:36 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v15 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v15 +; GFX10-NEXT: v_and_b32_e32 v52, 0xffff0000, v10 +; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v32 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v33 +; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v32 +; GFX10-NEXT: v_and_b32_e32 v32, 0xffff0000, v33 +; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32 +; GFX10-NEXT: v_fmac_f32_e32 v31, v49, v50 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v30 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v14 +; GFX10-NEXT: v_fmac_f32_e32 v15, v51, v32 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v34 +; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v14 +; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v34 +; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28 +; GFX10-NEXT: v_fmac_f32_e32 v32, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v29 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v13 +; GFX10-NEXT: v_fmac_f32_e32 v14, v51, v30 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v35 +; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v13 +; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v35 +; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:24 +; GFX10-NEXT: v_fmac_f32_e32 v30, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v28 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v12 +; GFX10-NEXT: v_fmac_f32_e32 v13, v51, v29 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v36 +; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v12 +; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v36 +; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:20 +; GFX10-NEXT: v_fmac_f32_e32 v29, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v27 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v11 +; GFX10-NEXT: v_fmac_f32_e32 v12, v51, v28 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v37 +; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v11 +; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v37 +; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:16 +; GFX10-NEXT: v_fmac_f32_e32 v28, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26 +; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10 +; GFX10-NEXT: v_fmac_f32_e32 v11, v51, v27 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v38 +; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v26 +; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v38 +; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v25 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v39 +; GFX10-NEXT: v_fmac_f32_e32 v27, v50, v49 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v9 +; GFX10-NEXT: v_fmac_f32_e32 v10, v52, v51 +; GFX10-NEXT: s_clause 0x1 +; GFX10-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12 +; GFX10-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:8 +; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v31 +; GFX10-NEXT: v_fmac_f32_e32 v26, v49, v38 +; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:4 +; GFX10-NEXT: v_and_b32_e32 v49, 0xffff0000, v9 +; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v24 +; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v31, v31 +; GFX10-NEXT: v_fmac_f32_e32 v9, v49, v25 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v8 +; GFX10-NEXT: s_waitcnt vmcnt(8) +; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v48 +; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX10-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 +; GFX10-NEXT: v_fmac_f32_e32 v25, v49, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v7 +; GFX10-NEXT: v_fmac_f32_e32 v48, v8, v24 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v22 +; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX10-NEXT: s_waitcnt vmcnt(7) +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v33 +; GFX10-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX10-NEXT: v_fmac_f32_e32 v8, v49, v39 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v6 +; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v33, v7, v23 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v21 +; GFX10-NEXT: s_waitcnt vmcnt(6) +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v34 +; GFX10-NEXT: v_and_b32_e32 v34, 0xffff0000, v34 +; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v5 +; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX10-NEXT: v_fmac_f32_e32 v7, v39, v24 +; GFX10-NEXT: v_fmac_f32_e32 v34, v6, v22 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v4 +; GFX10-NEXT: s_waitcnt vmcnt(5) +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v35 +; GFX10-NEXT: v_and_b32_e32 v35, 0xffff0000, v35 +; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v19 +; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX10-NEXT: v_fmac_f32_e32 v6, v23, v49 +; GFX10-NEXT: v_fmac_f32_e32 v35, v5, v21 +; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v3 +; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX10-NEXT: s_waitcnt vmcnt(4) +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v36 +; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX10-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 +; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v2 +; GFX10-NEXT: v_fmac_f32_e32 v5, v39, v24 +; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX10-NEXT: v_fmac_f32_e32 v36, v4, v20 +; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v16 +; GFX10-NEXT: s_waitcnt vmcnt(3) +; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v37 +; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX10-NEXT: v_fmac_f32_e32 v39, v23, v22 +; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v37 +; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v0 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX10-NEXT: v_fmac_f32_e32 v23, v3, v19 +; GFX10-NEXT: s_waitcnt vmcnt(2) +; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v50 +; GFX10-NEXT: s_waitcnt vmcnt(1) +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v51 +; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v51 +; GFX10-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 +; GFX10-NEXT: v_cmp_u_f32_e64 s5, v33, v33 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v38 +; GFX10-NEXT: v_and_b32_e32 v38, 0xffff0000, v38 +; GFX10-NEXT: v_fmac_f32_e32 v37, v21, v49 +; GFX10-NEXT: v_fmac_f32_e32 v50, v2, v18 +; GFX10-NEXT: v_fmac_f32_e32 v19, v1, v17 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v48 +; GFX10-NEXT: v_fmac_f32_e32 v38, v0, v16 +; GFX10-NEXT: v_bfe_u32 v0, v48, 16, 1 +; GFX10-NEXT: v_bfe_u32 v16, v33, 16, 1 +; GFX10-NEXT: v_bfe_u32 v2, v8, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v33 +; GFX10-NEXT: v_bfe_u32 v18, v7, 16, 1 +; GFX10-NEXT: v_bfe_u32 v21, v34, 16, 1 +; GFX10-NEXT: v_add3_u32 v0, v0, v48, 0x7fff +; GFX10-NEXT: v_bfe_u32 v48, v35, 16, 1 +; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff +; GFX10-NEXT: v_bfe_u32 v33, v5, 16, 1 +; GFX10-NEXT: v_fmac_f32_e32 v3, v4, v24 +; GFX10-NEXT: v_fmac_f32_e32 v51, v22, v20 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v8 +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v7 +; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v34 +; GFX10-NEXT: v_bfe_u32 v24, v6, 16, 1 +; GFX10-NEXT: v_add3_u32 v2, v2, v8, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s4, v8, v8 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v35 +; GFX10-NEXT: v_add3_u32 v18, v18, v7, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s6, v7, v7 +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v5 +; GFX10-NEXT: v_add3_u32 v21, v21, v34, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s7, v34, v34 +; GFX10-NEXT: v_bfe_u32 v34, v39, 16, 1 +; GFX10-NEXT: v_add3_u32 v48, v48, v35, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s9, v35, v35 +; GFX10-NEXT: v_bfe_u32 v35, v23, 16, 1 +; GFX10-NEXT: v_add3_u32 v33, v33, v5, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s10, v5, v5 +; GFX10-NEXT: v_bfe_u32 v5, v37, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v49, 0x400000, v6 +; GFX10-NEXT: v_add3_u32 v24, v24, v6, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s8, v6, v6 +; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v39 +; GFX10-NEXT: v_add3_u32 v34, v34, v39, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s11, v39, v39 +; GFX10-NEXT: v_or_b32_e32 v39, 0x400000, v23 +; GFX10-NEXT: v_add3_u32 v35, v35, v23, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s12, v23, v23 +; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v37 +; GFX10-NEXT: v_add3_u32 v5, v5, v37, 0x7fff +; GFX10-NEXT: v_cmp_u_f32_e64 s13, v37, v37 +; GFX10-NEXT: v_bfe_u32 v37, v31, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e64 v53, v2, v4, s4 +; GFX10-NEXT: v_bfe_u32 v4, v3, 16, 1 +; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, v17, s5 +; GFX10-NEXT: v_cndmask_b32_e64 v17, v18, v20, s6 +; GFX10-NEXT: v_add3_u32 v37, v37, v31, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v18, v21, v22, s7 +; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3 +; GFX10-NEXT: v_bfe_u32 v22, v19, 16, 1 +; GFX10-NEXT: v_add3_u32 v4, v4, v3, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v31, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v15, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v15 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v15, v15 +; GFX10-NEXT: v_cndmask_b32_e64 v21, v24, v49, s8 +; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v19 +; GFX10-NEXT: v_add3_u32 v37, v37, v15, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v7, v33, v7, s10 +; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1 +; GFX10-NEXT: v_add3_u32 v22, v22, v19, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v6, v34, v6, s11 +; GFX10-NEXT: v_cndmask_b32_e64 v15, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v32, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v32 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v32, v32 +; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v51 +; GFX10-NEXT: v_cndmask_b32_e64 v35, v35, v39, s12 +; GFX10-NEXT: v_add3_u32 v37, v37, v32, 0x7fff +; GFX10-NEXT: v_bfe_u32 v39, v38, 16, 1 +; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v23, s13 +; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v38 +; GFX10-NEXT: v_cndmask_b32_e64 v32, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v14, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v14 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v14, v14 +; GFX10-NEXT: v_add3_u32 v39, v39, v38, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v50 +; GFX10-NEXT: v_add3_u32 v37, v37, v14, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v8, v48, v8, s9 +; GFX10-NEXT: v_perm_b32 v15, v15, v31, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e64 v14, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v30, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v30 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v30, v30 +; GFX10-NEXT: v_perm_b32 v14, v14, v32, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v30, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v30, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v13, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v13 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v13, v13 +; GFX10-NEXT: v_add3_u32 v37, v37, v13, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v13, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v29, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v29 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v29, v29 +; GFX10-NEXT: v_perm_b32 v13, v13, v30, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v29, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v29, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v12, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v12, v12 +; GFX10-NEXT: v_add3_u32 v37, v37, v12, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v12, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v28, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v28 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v28, v28 +; GFX10-NEXT: v_perm_b32 v12, v12, v29, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v28, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v28, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v11, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v11 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v11, v11 +; GFX10-NEXT: v_add3_u32 v37, v37, v11, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v11, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v27, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v27 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v27, v27 +; GFX10-NEXT: v_perm_b32 v11, v11, v28, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v27, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v27, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v10, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v10 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v10, v10 +; GFX10-NEXT: v_add3_u32 v37, v37, v10, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v10, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v26, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v26 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v26, v26 +; GFX10-NEXT: v_perm_b32 v10, v10, v27, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v26, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v26, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v9, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v9 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9 +; GFX10-NEXT: v_add3_u32 v37, v37, v9, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v9, v37, v52, s14 +; GFX10-NEXT: v_bfe_u32 v37, v25, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v25 +; GFX10-NEXT: v_cmp_u_f32_e64 s14, v25, v25 +; GFX10-NEXT: v_perm_b32 v9, v9, v26, 0x7060302 +; GFX10-NEXT: v_add3_u32 v37, v37, v25, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e64 v25, v37, v52, s14 +; GFX10-NEXT: v_cndmask_b32_e32 v52, v0, v1, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_bfe_u32 v1, v50, 16, 1 +; GFX10-NEXT: v_bfe_u32 v37, v36, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v36 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v20, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19 +; GFX10-NEXT: v_add3_u32 v1, v1, v50, 0x7fff +; GFX10-NEXT: v_add3_u32 v37, v37, v36, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v4, v22, v24, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX10-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX10-NEXT: v_cndmask_b32_e32 v20, v39, v23, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX10-NEXT: v_perm_b32 v1, v4, v3, 0x7060302 +; GFX10-NEXT: v_perm_b32 v3, v35, v6, 0x7060302 +; GFX10-NEXT: v_perm_b32 v6, v18, v17, 0x7060302 +; GFX10-NEXT: v_perm_b32 v2, v2, v5, 0x7060302 +; GFX10-NEXT: v_cndmask_b32_e32 v22, v37, v0, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v20, v19, 0x7060302 +; GFX10-NEXT: v_perm_b32 v5, v8, v21, 0x7060302 +; GFX10-NEXT: v_perm_b32 v8, v52, v25, 0x7060302 +; GFX10-NEXT: v_perm_b32 v4, v22, v7, 0x7060302 +; GFX10-NEXT: v_perm_b32 v7, v16, v53, 0x7060302 +; GFX10-NEXT: s_setpc_b64 s[30:31] +; +; GFX11TRUE16-LABEL: v_fma_v32bf16: +; GFX11TRUE16: ; %bb.0: +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: s_clause 0x10 +; GFX11TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX11TRUE16-NEXT: scratch_load_b32 v32, off, s32 +; GFX11TRUE16-NEXT: scratch_load_b32 v33, off, s32 offset:60 +; GFX11TRUE16-NEXT: scratch_load_b32 v34, off, s32 offset:56 +; GFX11TRUE16-NEXT: scratch_load_b32 v35, off, s32 offset:52 +; GFX11TRUE16-NEXT: scratch_load_b32 v36, off, s32 offset:48 +; GFX11TRUE16-NEXT: scratch_load_b32 v37, off, s32 offset:44 +; GFX11TRUE16-NEXT: scratch_load_b32 v38, off, s32 offset:40 +; GFX11TRUE16-NEXT: scratch_load_b32 v39, off, s32 offset:36 +; GFX11TRUE16-NEXT: scratch_load_b32 v48, off, s32 offset:32 +; GFX11TRUE16-NEXT: scratch_load_b32 v49, off, s32 offset:28 +; GFX11TRUE16-NEXT: scratch_load_b32 v50, off, s32 offset:24 +; GFX11TRUE16-NEXT: scratch_load_b32 v51, off, s32 offset:20 +; GFX11TRUE16-NEXT: scratch_load_b32 v52, off, s32 offset:16 +; GFX11TRUE16-NEXT: scratch_load_b32 v53, off, s32 offset:12 +; GFX11TRUE16-NEXT: scratch_load_b32 v54, off, s32 offset:8 +; GFX11TRUE16-NEXT: scratch_load_b32 v55, off, s32 offset:4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v99, 0xffff0000, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v100, 0xffff0000, v5 +; GFX11TRUE16-NEXT: v_and_b32_e32 v101, 0xffff0000, v20 +; GFX11TRUE16-NEXT: v_and_b32_e32 v102, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v115, 0xffff0000, v17 +; GFX11TRUE16-NEXT: v_and_b32_e32 v116, 0xffff0000, v1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v97, 0xffff0000, v22 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX11TRUE16-NEXT: v_and_b32_e32 v117, 0xffff0000, v16 +; GFX11TRUE16-NEXT: v_and_b32_e32 v118, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v103, 0xffff0000, v19 +; GFX11TRUE16-NEXT: v_and_b32_e32 v112, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_and_b32_e32 v85, 0xffff0000, v24 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX11TRUE16-NEXT: v_and_b32_e32 v113, 0xffff0000, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v114, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(16) +; GFX11TRUE16-NEXT: v_and_b32_e32 v119, 0xffff0000, v31 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(15) +; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v32 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(14) +; GFX11TRUE16-NEXT: v_and_b32_e32 v129, 0xffff0000, v33 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v33 +; GFX11TRUE16-NEXT: v_and_b32_e32 v68, 0xffff0000, v13 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(12) +; GFX11TRUE16-NEXT: v_and_b32_e32 v131, 0xffff0000, v35 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(10) +; GFX11TRUE16-NEXT: v_and_b32_e32 v133, 0xffff0000, v37 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(9) +; GFX11TRUE16-NEXT: v_and_b32_e32 v134, 0xffff0000, v38 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v37, 16, v37 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(7) +; GFX11TRUE16-NEXT: v_and_b32_e32 v144, 0xffff0000, v48 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v48 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(5) +; GFX11TRUE16-NEXT: v_and_b32_e32 v146, 0xffff0000, v50 +; GFX11TRUE16-NEXT: v_and_b32_e32 v145, 0xffff0000, v49 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v49, 16, v49 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(4) +; GFX11TRUE16-NEXT: v_and_b32_e32 v147, 0xffff0000, v51 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v51, 16, v51 +; GFX11TRUE16-NEXT: v_and_b32_e32 v96, 0xffff0000, v7 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11TRUE16-NEXT: v_and_b32_e32 v148, 0xffff0000, v55 +; GFX11TRUE16-NEXT: v_and_b32_e32 v87, 0xffff0000, v23 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v83, 0xffff0000, v25 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_lshlrev_b32 v25, 16, v25 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v55, 16, v55 +; GFX11TRUE16-NEXT: v_and_b32_e32 v98, 0xffff0000, v6 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v84, 0xffff0000, v9 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v48, v7, v23 +; GFX11TRUE16-NEXT: v_and_b32_e32 v135, 0xffff0000, v39 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v39, 16, v39 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v49, v6, v22 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_lshlrev_b32 v13, 16, v13 +; GFX11TRUE16-NEXT: v_bfe_u32 v83, v146, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v51, v4, v20 :: v_dual_fmac_f32 v148, v118, v117 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_and_b32 v81, 0xffff0000, v26 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v55, v0, v16 :: v_dual_lshlrev_b32 v26, 16, v26 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v145, v98, v97 +; GFX11TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v146 +; GFX11TRUE16-NEXT: v_add3_u32 v83, v83, v146, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX11TRUE16-NEXT: v_and_b32_e32 v86, 0xffff0000, v8 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX11TRUE16-NEXT: v_and_b32_e32 v82, 0xffff0000, v10 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_lshlrev_b32 v10, 16, v10 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v38, 16, v38 +; GFX11TRUE16-NEXT: v_and_b32_e32 v69, 0xffff0000, v28 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_lshlrev_b32 v28, 16, v28 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v39, v8, v24 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_and_b32 v70, 0xffff0000, v12 +; GFX11TRUE16-NEXT: v_bfe_u32 v97, v51, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v23, v37, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_lshlrev_b32 v12, 16, v12 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v35, 16, v35 +; GFX11TRUE16-NEXT: v_and_b32_e32 v80, 0xffff0000, v11 +; GFX11TRUE16-NEXT: v_and_b32_e32 v132, 0xffff0000, v36 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v36, 16, v36 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v50, 16, v50 +; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v133 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX11TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v37 +; GFX11TRUE16-NEXT: v_or_b32_e32 v98, 0x400000, v51 +; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v37, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX11TRUE16-NEXT: v_and_b32_e32 v71, 0xffff0000, v27 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX11TRUE16-NEXT: v_add3_u32 v97, v97, v51, 0x7fff +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v32 +; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v15 +; GFX11TRUE16-NEXT: v_and_b32_e32 v130, 0xffff0000, v34 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_lshlrev_b32 v34, 16, v34 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v36, v11, v27 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v50, v5, v21 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_and_b32 v67, 0xffff0000, v29 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_and_b32 v65, 0xffff0000, v30 +; GFX11TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v36 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_fmac_f32 v31, v15, v32 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v119, v64, v128 :: v_dual_and_b32 v66, 0xffff0000, v14 +; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v52 +; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v53 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v53, 16, v53 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_lshlrev_b32 v30, 16, v30 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v52, 16, v52 +; GFX11TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v54, 16, v54 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v64, v112, v103 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v38, v9, v25 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_lshlrev_b32 v14, 16, v14 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v53, v2, v18 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v119, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v2, v31, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v54, v1, v17 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v119 +; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v31 +; GFX11TRUE16-NEXT: v_bfe_u32 v4, v129, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31 +; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v129 +; GFX11TRUE16-NEXT: v_bfe_u32 v6, v33, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v14, v132, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v15, v0, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129 +; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v33 +; GFX11TRUE16-NEXT: v_bfe_u32 v8, v130, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v150, v14, v132, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v2, v5, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33 +; GFX11TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v130 +; GFX11TRUE16-NEXT: v_bfe_u32 v10, v34, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v13, v35, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130 +; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v34 +; GFX11TRUE16-NEXT: v_bfe_u32 v12, v131, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v35, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v4, v9, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 +; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v131 +; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v35 +; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v132 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131 +; GFX11TRUE16-NEXT: v_bfe_u32 v19, v36, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v21, v133, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v25, v134, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v134 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v12, v8, v16, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35 +; GFX11TRUE16-NEXT: v_add3_u32 v19, v19, v36, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v21, v21, v133, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v27, v38, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v134, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v16, v10, v17, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132 +; GFX11TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v38 +; GFX11TRUE16-NEXT: v_bfe_u32 v29, v135, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v27, v27, v38, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v30, 0x400000, v135 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v150, v18, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX11TRUE16-NEXT: v_bfe_u32 v65, v39, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v29, v29, v135, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v66, 0x400000, v39 +; GFX11TRUE16-NEXT: v_bfe_u32 v67, v144, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v17, v19, v20, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133 +; GFX11TRUE16-NEXT: v_add3_u32 v65, v65, v39, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v68, 0x400000, v144 +; GFX11TRUE16-NEXT: v_bfe_u32 v69, v48, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v67, v67, v144, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v10, v21, v22, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37 +; GFX11TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v48 +; GFX11TRUE16-NEXT: v_bfe_u32 v71, v145, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v69, v69, v48, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v145 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v23, v24, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134 +; GFX11TRUE16-NEXT: v_bfe_u32 v81, v49, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v71, v71, v145, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v49 +; GFX11TRUE16-NEXT: v_bfe_u32 v85, v50, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v9, v25, v26, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX11TRUE16-NEXT: v_add3_u32 v81, v81, v49, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v86, 0x400000, v50 +; GFX11TRUE16-NEXT: v_bfe_u32 v87, v147, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v85, v85, v50, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v19, v27, v28, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135 +; GFX11TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v147 +; GFX11TRUE16-NEXT: v_add3_u32 v87, v87, v147, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v99, v64, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v64 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v29, v30, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39 +; GFX11TRUE16-NEXT: v_bfe_u32 v101, v52, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v99, v99, v64, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v102, 0x400000, v52 +; GFX11TRUE16-NEXT: v_bfe_u32 v117, v54, 16, 1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v20, v65, v66, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144 +; GFX11TRUE16-NEXT: v_add3_u32 v101, v101, v52, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v118, 0x400000, v54 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v55, 16, 1 +; GFX11TRUE16-NEXT: v_add3_u32 v117, v117, v54, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v67, v68, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v55 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v119, v148, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v148 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v21, v69, v70, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v20.h +; GFX11TRUE16-NEXT: v_add3_u32 v119, v119, v148, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, v19.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v80, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v10.l, v18.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v11.l, v17.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v12.l, v16.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v13.l, v34.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v22, v81, v82, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v14.l, v33.h +; GFX11TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v22.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v83, v84, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v23, v85, v86, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v23.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v87, v96, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v128, v114, v113 +; GFX11TRUE16-NEXT: v_bfe_u32 v113, v53, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v114, 0x400000, v53 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v24, v97, v98, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64 +; GFX11TRUE16-NEXT: v_bfe_u32 v103, v128, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v112, 0x400000, v128 +; GFX11TRUE16-NEXT: v_add3_u32 v113, v113, v53, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v99, v100, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52 +; GFX11TRUE16-NEXT: v_add3_u32 v103, v103, v128, 0x7fff +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v25, v101, v102, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v32, v116, v115 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v25.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v103, v112, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53 +; GFX11TRUE16-NEXT: v_bfe_u32 v115, v32, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v116, 0x400000, v32 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v26, v113, v114, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_add3_u32 v115, v115, v32, 0x7fff +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v26.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v27, v117, v118, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v28, v0, v1, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v115, v116, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v148, v148 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v27.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v119, v31, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v28.h +; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11FAKE16-LABEL: v_fma_v32bf16: +; GFX11FAKE16: ; %bb.0: +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: s_clause 0x10 +; GFX11FAKE16-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX11FAKE16-NEXT: scratch_load_b32 v32, off, s32 +; GFX11FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:60 +; GFX11FAKE16-NEXT: scratch_load_b32 v34, off, s32 offset:56 +; GFX11FAKE16-NEXT: scratch_load_b32 v35, off, s32 offset:52 +; GFX11FAKE16-NEXT: scratch_load_b32 v36, off, s32 offset:48 +; GFX11FAKE16-NEXT: scratch_load_b32 v37, off, s32 offset:44 +; GFX11FAKE16-NEXT: scratch_load_b32 v38, off, s32 offset:40 +; GFX11FAKE16-NEXT: scratch_load_b32 v39, off, s32 offset:36 +; GFX11FAKE16-NEXT: scratch_load_b32 v48, off, s32 offset:32 +; GFX11FAKE16-NEXT: scratch_load_b32 v49, off, s32 offset:28 +; GFX11FAKE16-NEXT: scratch_load_b32 v50, off, s32 offset:24 +; GFX11FAKE16-NEXT: scratch_load_b32 v51, off, s32 offset:20 +; GFX11FAKE16-NEXT: scratch_load_b32 v52, off, s32 offset:16 +; GFX11FAKE16-NEXT: scratch_load_b32 v53, off, s32 offset:12 +; GFX11FAKE16-NEXT: scratch_load_b32 v54, off, s32 offset:8 +; GFX11FAKE16-NEXT: scratch_load_b32 v55, off, s32 offset:4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v99, 16, v21 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v100, 16, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v97, 16, v22 +; GFX11FAKE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v101, 16, v20 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v102, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v20 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v117, 16, v16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v118, 16, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v87, 16, v23 +; GFX11FAKE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v23 +; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16 +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v98, 16, v6 +; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v103, 16, v19 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v112, 16, v3 +; GFX11FAKE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v19 +; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v85, 16, v24 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v113, 16, v18 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v114, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v115, 16, v17 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v116, 16, v1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v24 +; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(15) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v32 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(14) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v129, 16, v33 +; GFX11FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v33 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v68, 16, v13 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(12) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v131, 16, v35 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(10) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v133, 16, v37 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(9) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v134, 16, v38 +; GFX11FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v37 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(7) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v144, 16, v48 +; GFX11FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v48 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(5) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v146, 16, v50 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v145, 16, v49 +; GFX11FAKE16-NEXT: v_and_b32_e32 v49, 0xffff0000, v49 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v84, 16, v9 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(4) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v147, 16, v51 +; GFX11FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v51 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v96, 16, v7 +; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v83, 16, v25 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_and_b32 v25, 0xffff0000, v25 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v135, 16, v39 +; GFX11FAKE16-NEXT: v_and_b32_e32 v39, 0xffff0000, v39 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v48, v7, v23 :: v_dual_fmac_f32 v49, v6, v22 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_and_b32 v13, 0xffff0000, v13 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v51, v4, v20 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_lshlrev_b32 v81, 16, v26 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v145, v98, v97 :: v_dual_and_b32 v26, 0xffff0000, v26 +; GFX11FAKE16-NEXT: v_or_b32_e32 v84, 0x400000, v146 +; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v86, 16, v8 +; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v82, 16, v10 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_and_b32 v10, 0xffff0000, v10 +; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9 +; GFX11FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v38 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v69, 16, v28 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_and_b32 v28, 0xffff0000, v28 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v39, v8, v24 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_lshlrev_b32 v70, 16, v12 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_and_b32 v12, 0xffff0000, v12 +; GFX11FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v35 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v80, 16, v11 +; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v132, 16, v36 +; GFX11FAKE16-NEXT: v_and_b32_e32 v36, 0xffff0000, v36 +; GFX11FAKE16-NEXT: v_and_b32_e32 v50, 0xffff0000, v50 +; GFX11FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v133 +; GFX11FAKE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21 +; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v37 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v71, 16, v27 +; GFX11FAKE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27 +; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v32 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v130, 16, v34 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_and_b32 v34, 0xffff0000, v34 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v36, v11, v27 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v50, v5, v21 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_lshlrev_b32 v67, 16, v29 +; GFX11FAKE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v29 +; GFX11FAKE16-NEXT: v_or_b32_e32 v98, 0x400000, v51 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v119, 16, v31 +; GFX11FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v31 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v15 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_and_b32 v15, 0xffff0000, v15 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_lshlrev_b32 v65, 16, v30 +; GFX11FAKE16-NEXT: v_bfe_u32 v23, v37, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v31, v15, v32 :: v_dual_lshlrev_b32 v66, 16, v14 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v119, v64, v128 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(3) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v52 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(2) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v53 +; GFX11FAKE16-NEXT: v_and_b32_e32 v53, 0xffff0000, v53 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(1) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v54 +; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54 +; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v54, 16, v55 +; GFX11FAKE16-NEXT: v_and_b32_e32 v55, 0xffff0000, v55 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_and_b32 v30, 0xffff0000, v30 +; GFX11FAKE16-NEXT: v_and_b32_e32 v52, 0xffff0000, v52 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v64, v112, v103 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v38, v9, v25 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_and_b32 v14, 0xffff0000, v14 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v53, v2, v18 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v55, v0, v16 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v119, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v31, 16, 1 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v32, v1, v17 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v119 +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v31 +; GFX11FAKE16-NEXT: v_bfe_u32 v4, v129, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v128, v114, v113 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v54, v118, v117 +; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v129 +; GFX11FAKE16-NEXT: v_bfe_u32 v6, v33, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v10, v34, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v14, v35, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v19, v36, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v27, v38, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v65, v39, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v69, v48, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v81, v49, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v85, v50, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v97, v51, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v101, v52, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v113, v53, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v117, v32, 16, 1 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v148, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v116, v115 +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v33 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v130, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v10, v14, v35, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v14, v19, v36, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v19, v23, v37, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v23, v27, v38, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v27, v65, v39, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v65, v69, v48, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v69, v81, v49, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v81, v85, v50, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v85, v97, v51, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v97, v101, v52, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v101, v113, v53, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v113, v117, v32, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v117, v2, v5, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33 +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v130 +; GFX11FAKE16-NEXT: v_bfe_u32 v12, v131, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v17, v132, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v21, v133, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v25, v134, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v29, v135, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v67, v144, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v71, v145, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v83, v146, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v87, v147, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v99, v64, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v103, v128, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v115, v15, 16, 1 +; GFX11FAKE16-NEXT: v_bfe_u32 v119, v54, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130 +; GFX11FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v34 +; GFX11FAKE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v12, v17, v132, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v17, v21, v133, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v134, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v25, v29, v135, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v29, v67, v144, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v67, v71, v145, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v71, v83, v146, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v83, v87, v147, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v87, v99, v64, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v99, v103, v128, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v103, v115, v15, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v115, v119, v54, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v119, v4, v9, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34 +; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v131 +; GFX11FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v35 +; GFX11FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v132 +; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v36 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131 +; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v134 +; GFX11FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v38 +; GFX11FAKE16-NEXT: v_or_b32_e32 v30, 0x400000, v135 +; GFX11FAKE16-NEXT: v_or_b32_e32 v66, 0x400000, v39 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v13, v8, v13, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35 +; GFX11FAKE16-NEXT: v_or_b32_e32 v68, 0x400000, v144 +; GFX11FAKE16-NEXT: v_or_b32_e32 v70, 0x400000, v48 +; GFX11FAKE16-NEXT: v_or_b32_e32 v80, 0x400000, v145 +; GFX11FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v49 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v16, v10, v16, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132 +; GFX11FAKE16-NEXT: v_or_b32_e32 v86, 0x400000, v50 +; GFX11FAKE16-NEXT: v_or_b32_e32 v96, 0x400000, v147 +; GFX11FAKE16-NEXT: v_or_b32_e32 v100, 0x400000, v64 +; GFX11FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v52 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v18, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36 +; GFX11FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v128 +; GFX11FAKE16-NEXT: v_or_b32_e32 v116, 0x400000, v15 +; GFX11FAKE16-NEXT: v_or_b32_e32 v118, 0x400000, v32 +; GFX11FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v54 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v20, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v55, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v55 +; GFX11FAKE16-NEXT: v_or_b32_e32 v114, 0x400000, v53 +; GFX11FAKE16-NEXT: v_perm_b32 v11, v12, v11, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v17, v22, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff +; GFX11FAKE16-NEXT: v_perm_b32 v12, v16, v13, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v13, v34, v119, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v19, v24, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v10, v14, v10, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v21, v26, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38 +; GFX11FAKE16-NEXT: v_perm_b32 v14, v33, v117, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v28, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v9, v17, v9, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v25, v30, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v18, v27, v66, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144 +; GFX11FAKE16-NEXT: v_perm_b32 v8, v18, v8, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v29, v68, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v19, v65, v70, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v7, v19, v7, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v67, v80, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v20, v69, v82, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146 +; GFX11FAKE16-NEXT: v_perm_b32 v6, v20, v6, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v71, v84, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v21, v81, v86, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v5, v21, v5, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v83, v96, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v87, v100, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v22, v97, v102, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v99, v112, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v103, v116, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v113, v118, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v24, v115, v31, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53 +; GFX11FAKE16-NEXT: v_perm_b32 v1, v23, v15, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v15, v149, v148, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v24, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v25, v101, v114, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51 +; GFX11FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x7060302 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v26, v85, v98, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_perm_b32 v4, v26, v4, 0x7060302 +; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: v_fma_v32bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_clause 0x10 +; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64 +; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4 +; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8 +; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12 +; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16 +; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20 +; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24 +; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28 +; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32 +; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36 +; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40 +; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44 +; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48 +; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52 +; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56 +; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60 +; GFX1250-NEXT: scratch_load_b32 v55, off, s32 +; GFX1250-NEXT: s_wait_loadcnt 0xf +; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32 +; GFX1250-NEXT: s_wait_loadcnt 0xe +; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33 +; GFX1250-NEXT: s_wait_loadcnt 0xd +; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34 +; GFX1250-NEXT: s_wait_loadcnt 0xc +; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35 +; GFX1250-NEXT: s_wait_loadcnt 0xb +; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36 +; GFX1250-NEXT: s_wait_loadcnt 0xa +; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37 +; GFX1250-NEXT: s_wait_loadcnt 0x9 +; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38 +; GFX1250-NEXT: s_wait_loadcnt 0x8 +; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39 +; GFX1250-NEXT: s_wait_loadcnt 0x7 +; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48 +; GFX1250-NEXT: s_wait_loadcnt 0x6 +; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49 +; GFX1250-NEXT: s_wait_loadcnt 0x5 +; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50 +; GFX1250-NEXT: s_wait_loadcnt 0x4 +; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51 +; GFX1250-NEXT: s_wait_loadcnt 0x3 +; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52 +; GFX1250-NEXT: s_wait_loadcnt 0x2 +; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53 +; GFX1250-NEXT: s_wait_loadcnt 0x1 +; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %op = call <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) ret <32 x bfloat> %op } diff --git a/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll b/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll index 12f8a59f0b84b..d89b39348ad9a 100644 --- a/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll +++ b/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll @@ -17,50 +17,50 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: early-clobber renamable $sgpr20_sgpr21_sgpr22_sgpr23 = S_LOAD_DWORDX4_IMM_ec renamable $sgpr8_sgpr9, 24, 0 :: (dereferenceable invariant load (s128) from %ir.arg6.kernarg.offset.align.down, align 8, addrspace 4) ; GFX90A-NEXT: renamable $sgpr33 = S_LOAD_DWORD_IMM renamable $sgpr8_sgpr9, 40, 0 :: (dereferenceable invariant load (s32) from %ir.arg6.kernarg.offset.align.down + 16, align 8, addrspace 4) ; GFX90A-NEXT: renamable $sgpr24_sgpr25_sgpr26_sgpr27 = S_LOAD_DWORDX4_IMM renamable $sgpr8_sgpr9, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4) - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_LOAD_DWORDX2_IMM renamable $sgpr8_sgpr9, 16, 0 :: (dereferenceable invariant load (s64) from %ir.arg.kernarg.offset1 + 16, align 16, addrspace 4) + ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_LOAD_DWORDX2_IMM renamable $sgpr8_sgpr9, 16, 0 :: (dereferenceable invariant load (s64) from %ir.arg.kernarg.offset1 + 16, align 16, addrspace 4) ; GFX90A-NEXT: S_BITCMP1_B32 renamable $sgpr17, 0, implicit-def $scc ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_CSELECT_B64 -1, 0, implicit killed $scc - ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $sgpr28_sgpr29 = S_XOR_B64 renamable $sgpr12_sgpr13, -1, implicit-def dead $scc ; GFX90A-NEXT: S_BITCMP1_B32 renamable $sgpr17, 8, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_CSELECT_B64 -1, 0, implicit killed $scc - ; GFX90A-NEXT: renamable $sgpr30_sgpr31 = S_XOR_B64 killed renamable $sgpr18_sgpr19, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr30_sgpr31 = S_CSELECT_B64 -1, 0, implicit killed $scc + ; GFX90A-NEXT: renamable $sgpr30_sgpr31 = S_XOR_B64 killed renamable $sgpr30_sgpr31, -1, implicit-def dead $scc ; GFX90A-NEXT: renamable $vgpr5 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr4 = DS_READ_B32_gfx9 renamable $vgpr5, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) null`, align 8, addrspace 3) - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr28_sgpr29, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCZ %bb.2, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.1.bb103: ; GFX90A-NEXT: successors: %bb.58(0x40000000), %bb.2(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr40_sgpr41, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr30_sgpr31, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.58, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.2: ; GFX90A-NEXT: successors: %bb.3(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr46, $sgpr47, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr4, $vgpr5 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr40_sgpr41, $sgpr56, $sgpr57, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr4, $vgpr5 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 ; GFX90A-NEXT: renamable $vgpr3 = IMPLICIT_DEF implicit-def $vgpr2 - ; GFX90A-NEXT: renamable $vgpr21 = IMPLICIT_DEF implicit-def $vgpr20 - ; GFX90A-NEXT: renamable $vgpr23 = IMPLICIT_DEF implicit-def $vgpr22 ; GFX90A-NEXT: renamable $vgpr25 = IMPLICIT_DEF implicit-def $vgpr24 + ; GFX90A-NEXT: renamable $vgpr27 = IMPLICIT_DEF implicit-def $vgpr26 + ; GFX90A-NEXT: renamable $vgpr29 = IMPLICIT_DEF implicit-def $vgpr28 ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 0 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.3.Flow17: ; GFX90A-NEXT: successors: %bb.4(0x40000000), %bb.57(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr40_sgpr41, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr30 = V_AND_B32_e32 1023, $vgpr31, implicit $exec - ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr34_sgpr35, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr18_sgpr19, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCZ %bb.57, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.4.bb15: ; GFX90A-NEXT: successors: %bb.35(0x40000000), %bb.5(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr40_sgpr41 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0_vgpr1 = V_LSHLREV_B64_e64 2, $vgpr4_vgpr5, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = COPY renamable $sgpr25, implicit $exec @@ -75,12 +75,12 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.5: ; GFX90A-NEXT: successors: %bb.6(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr40_sgpr41 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 0 @@ -98,47 +98,49 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr56_vgpr57 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr44_vgpr45 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr42_vgpr43 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.6.Flow20: ; GFX90A-NEXT: successors: %bb.7(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr20 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr22 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr24 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr25 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr26 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr28 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr29 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr15 = COPY killed renamable $sgpr18, implicit $exec ; GFX90A-NEXT: renamable $vgpr3 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr21 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr23 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr25 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr27 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.7.Flow19: ; GFX90A-NEXT: successors: %bb.62(0x40000000), %bb.8(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_MOV_B64 0 - ; GFX90A-NEXT: $sgpr24_sgpr25 = S_AND_SAVEEXEC_B64 $sgpr36_sgpr37, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $sgpr18_sgpr19 = S_AND_SAVEEXEC_B64 $sgpr36_sgpr37, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.62, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.8.Flow32: ; GFX90A-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr24_sgpr25, implicit-def $scc - ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr18_sgpr19, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr18_sgpr19, implicit-def $scc + ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr40_sgpr41, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_EXECZ %bb.10, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.9.bb89: ; GFX90A-NEXT: successors: %bb.10(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET renamable $vgpr11, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr10, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5) @@ -146,16 +148,16 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.10.Flow33: ; GFX90A-NEXT: successors: %bb.11(0x40000000), %bb.12(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc - ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr44_sgpr45, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr46_sgpr47, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_EXECZ %bb.12, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.11.bb84: ; GFX90A-NEXT: successors: %bb.12(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET renamable $vgpr9, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr8, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5) @@ -163,16 +165,16 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.12.Flow34: ; GFX90A-NEXT: successors: %bb.13(0x40000000), %bb.14(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc - ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr42_sgpr43, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr44_sgpr45, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_EXECZ %bb.14, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.13.bb79: ; GFX90A-NEXT: successors: %bb.14(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET renamable $vgpr7, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr6, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5) @@ -180,10 +182,10 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.14.Flow35: ; GFX90A-NEXT: successors: %bb.15(0x40000000), %bb.16(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $vgpr0_vgpr1:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc - ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr40_sgpr41, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $sgpr42_sgpr43, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_EXECZ %bb.16, implicit $exec ; GFX90A-NEXT: {{ $}} @@ -357,15 +359,15 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.35.bb20: ; GFX90A-NEXT: successors: %bb.37(0x40000000), %bb.36(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr40_sgpr41 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_SBYTE renamable $vgpr40_vgpr41, 1024, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i23) ; GFX90A-NEXT: renamable $vgpr42 = V_ADD_CO_U32_e32 1024, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr38_sgpr39 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 0 @@ -383,33 +385,33 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr58_vgpr59 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr56_vgpr57 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr44_vgpr45 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr24_sgpr25 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.37, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.36.Flow21: ; GFX90A-NEXT: successors: %bb.6(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr24_sgpr25, implicit-def $scc ; GFX90A-NEXT: S_BRANCH %bb.6 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.37.bb27: ; GFX90A-NEXT: successors: %bb.39(0x40000000), %bb.38(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19, $sgpr44_sgpr45, $sgpr42_sgpr43, $sgpr54_sgpr55, $sgpr52_sgpr53, $sgpr64_sgpr65, $sgpr50_sgpr51, $sgpr66_sgpr67 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr40_sgpr41, $sgpr46_sgpr47, $sgpr44_sgpr45, $sgpr64_sgpr65, $sgpr54_sgpr55, $sgpr52_sgpr53, $sgpr66_sgpr67, $sgpr48_sgpr49 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_UBYTE renamable $vgpr40_vgpr41, 2048, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i30) ; GFX90A-NEXT: renamable $vgpr44 = V_ADD_CO_U32_e32 2048, $vgpr40, implicit-def $vcc, implicit $exec - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr45, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec @@ -422,49 +424,51 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr60_vgpr61 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr58_vgpr59 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr56_vgpr57 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr38_sgpr39 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.39, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.38.Flow22: ; GFX90A-NEXT: successors: %bb.36(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr38_sgpr39, implicit-def $scc ; GFX90A-NEXT: renamable $sgpr38_sgpr39 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr40_sgpr41, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr42_sgpr43, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr66_sgpr67, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_AND_B64 killed renamable $sgpr42_sgpr43, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_AND_B64 killed renamable $sgpr18_sgpr19, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_AND_B64 killed renamable $sgpr40_sgpr41, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_ANDN2_B64 killed renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_OR_B64 killed renamable $sgpr36_sgpr37, killed renamable $sgpr46_sgpr47, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_OR_B64 killed renamable $sgpr36_sgpr37, killed renamable $sgpr56_sgpr57, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.36 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.39.bb34: ; GFX90A-NEXT: successors: %bb.41(0x40000000), %bb.40(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19, $sgpr44_sgpr45, $sgpr50_sgpr51, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr54_sgpr55, $sgpr62_sgpr63, $sgpr52_sgpr53, $sgpr64_sgpr65, $sgpr66_sgpr67 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr46_sgpr47, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr54_sgpr55, $sgpr52_sgpr53, $sgpr66_sgpr67 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_UBYTE renamable $vgpr40_vgpr41, 3072, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i37) ; GFX90A-NEXT: renamable $vgpr56 = V_ADD_CO_U32_e32 3072, $vgpr40, implicit-def $vcc, implicit $exec - ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = COPY renamable $sgpr36_sgpr37 + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr57, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr0, implicit $exec + ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF @@ -472,48 +476,48 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr62_vgpr63 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr60_vgpr61 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr58_vgpr59 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr40_sgpr41 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.41, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.40.Flow23: ; GFX90A-NEXT: successors: %bb.38(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr68_sgpr69, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr40_sgpr41, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr42_sgpr43, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr68_sgpr69, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_AND_B64 killed renamable $sgpr18_sgpr19, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr48_sgpr49, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_OR_B64 killed renamable $sgpr48_sgpr49, killed renamable $sgpr50_sgpr51, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.38 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.41.bb41: ; GFX90A-NEXT: successors: %bb.46(0x40000000), %bb.42(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr52_sgpr53, $sgpr50_sgpr51, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr52_sgpr53, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr54_sgpr55, $sgpr66_sgpr67, $sgpr68_sgpr69 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr58 = V_ADD_CO_U32_e32 4096, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = COPY $vcc ; GFX90A-NEXT: renamable $vgpr59, dead renamable $sgpr18_sgpr19 = V_ADDC_U32_e64 0, $vgpr41, killed $sgpr18_sgpr19, 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_UBYTE renamable $vgpr58_vgpr59, 0, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i44) - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = COPY renamable $sgpr36_sgpr37 + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr3, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr0, implicit $exec ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF @@ -522,271 +526,273 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr62_vgpr63 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr60_vgpr61 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr42_sgpr43 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.46, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.42.Flow24: ; GFX90A-NEXT: successors: %bb.40(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr42_sgpr43, implicit-def $scc ; GFX90A-NEXT: renamable $vgpr59 = COPY killed renamable $vgpr3, implicit $exec - ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_AND_B64 killed renamable $sgpr18_sgpr19, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr48_sgpr49, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_OR_B64 killed renamable $sgpr42_sgpr43, killed renamable $sgpr50_sgpr51, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.40 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.43.bb55: ; GFX90A-NEXT: successors: %bb.48(0x40000000), %bb.44(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr44_sgpr45, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57, $sgpr48_sgpr49 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr46_sgpr47, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59, $sgpr48_sgpr49 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: S_BITCMP1_B32 killed renamable $sgpr17, 16, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_CSELECT_B64 -1, 0, implicit killed $scc - ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_XOR_B64 renamable $sgpr64_sgpr65, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_CSELECT_B64 -1, 0, implicit killed $scc + ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_XOR_B64 renamable $sgpr66_sgpr67, -1, implicit-def dead $scc ; GFX90A-NEXT: renamable $vgpr62 = V_ADD_CO_U32_e32 6144, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $vgpr63, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec - ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr50_sgpr51, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr18_sgpr19, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.48, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.44: ; GFX90A-NEXT: successors: %bb.45(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr57, $vgpr56, $vgpr30, $vgpr31, $vgpr60, $vgpr62, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $vgpr61, $vgpr58, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr47, $vgpr46, $vgpr2, $vgpr4, $vgpr5, $vgpr45, $vgpr44, $vgpr43, $vgpr42, $vgpr41, $vgpr40, $vgpr63 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr62, $vgpr56, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $vgpr57, $vgpr61, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr47, $vgpr46, $vgpr2, $vgpr4, $vgpr5, $vgpr45, $vgpr44, $vgpr43, $vgpr42, $vgpr41, $vgpr40, $vgpr60, $vgpr63, $vgpr58 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 0 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.45.Flow26: ; GFX90A-NEXT: successors: %bb.47(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 - ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr70_sgpr71 = S_AND_B64 killed renamable $sgpr44_sgpr45, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr70_sgpr71 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_OR_B64 killed renamable $sgpr44_sgpr45, killed renamable $sgpr46_sgpr47, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr50_sgpr51, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.47 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.46.bb48: ; GFX90A-NEXT: successors: %bb.43(0x40000000), %bb.47(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr64_sgpr65, $sgpr50_sgpr51, $sgpr66_sgpr67, $sgpr44_sgpr45, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr66_sgpr67, $sgpr58_sgpr59, $sgpr68_sgpr69, $sgpr64_sgpr65, $sgpr46_sgpr47, $sgpr54_sgpr55, $sgpr60_sgpr61 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr60 = V_ADD_CO_U32_e32 5120, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = COPY $vcc ; GFX90A-NEXT: renamable $vgpr0 = V_ADD_CO_U32_e32 4096, $vgpr40, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $vgpr1, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr0 = FLAT_LOAD_UBYTE killed renamable $vgpr0_vgpr1, 1024, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i51) - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = COPY renamable $sgpr36_sgpr37 + ; GFX90A-NEXT: renamable $sgpr70_sgpr71 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr61, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $sgpr18_sgpr19, 0, implicit $exec ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr0, implicit $exec - ; GFX90A-NEXT: renamable $sgpr70_sgpr71 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr62_vgpr63 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 - ; GFX90A-NEXT: $sgpr18_sgpr19 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF + ; GFX90A-NEXT: $sgpr44_sgpr45 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.43, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.47.Flow25: ; GFX90A-NEXT: successors: %bb.42(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $sgpr70_sgpr71, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 - ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr18_sgpr19, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr60_sgpr61, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr70_sgpr71, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr68_sgpr69, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_AND_B64 killed renamable $sgpr66_sgpr67, $exec, implicit-def dead $scc + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr58_sgpr59, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr68_sgpr69, $sgpr70_sgpr71, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr44_sgpr45, implicit-def $scc + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_AND_B64 killed renamable $sgpr70_sgpr71, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr68_sgpr69, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr66_sgpr67, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_AND_B64 killed renamable $sgpr48_sgpr49, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr48_sgpr49, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_OR_B64 killed renamable $sgpr44_sgpr45, killed renamable $sgpr50_sgpr51, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.42 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.48.bb63: ; GFX90A-NEXT: successors: %bb.50(0x40000000), %bb.49(0x40000000) - ; GFX90A-NEXT: liveins: $vcc, $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr50_sgpr51, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57, $sgpr48_sgpr49 + ; GFX90A-NEXT: liveins: $vcc, $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr56_sgpr57:0x000000000000000F, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59, $sgpr48_sgpr49 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.50, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.49: ; GFX90A-NEXT: successors: %bb.44(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_MOV_B64 -1 ; GFX90A-NEXT: S_BRANCH %bb.44 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.50.bb68: ; GFX90A-NEXT: successors: %bb.54(0x40000000), %bb.51(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr50_sgpr51, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr56_sgpr57:0x000000000000000F, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = nuw nsw V_LSHLREV_B32_e32 3, $vgpr30, implicit $exec ; GFX90A-NEXT: renamable $vgpr1 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr50_sgpr51, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr18_sgpr19, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.54, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.51: ; GFX90A-NEXT: successors: %bb.45(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr58_sgpr59 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: S_BRANCH %bb.45 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.52.bb80: ; GFX90A-NEXT: successors: %bb.59(0x40000000), %bb.53(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr56_sgpr57:0x000000000000000F, $sgpr62_sgpr63, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr17 = S_BFE_U32 renamable $sgpr20, 65560, implicit-def dead $scc ; GFX90A-NEXT: S_CMP_EQ_U32 killed renamable $sgpr17, 0, implicit-def $scc ; GFX90A-NEXT: renamable $vgpr8 = V_ADD_CO_U32_e32 4096, $vgpr0, implicit-def $vcc, implicit $exec - ; GFX90A-NEXT: renamable $vgpr9, dead renamable $sgpr52_sgpr53 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr9, dead renamable $sgpr18_sgpr19 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: S_CBRANCH_SCC1 %bb.59, implicit killed $scc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.53: ; GFX90A-NEXT: successors: %bb.61(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 -1 - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = COPY renamable $sgpr36_sgpr37 + ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: S_BRANCH %bb.61 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.54.bb73: ; GFX90A-NEXT: successors: %bb.52(0x40000000), %bb.55(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr56_sgpr57:0x000000000000000F, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr60_sgpr61 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr8 = FLAT_LOAD_UBYTE renamable $vgpr0_vgpr1, 2048, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i76) ; GFX90A-NEXT: renamable $vgpr6 = V_ADD_CO_U32_e32 2048, $vgpr0, implicit-def $vcc, implicit $exec ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $vgpr7, dead renamable $sgpr58_sgpr59 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr7, dead renamable $sgpr18_sgpr19 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr8, implicit $exec - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $vgpr10_vgpr11 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 - ; GFX90A-NEXT: $sgpr60_sgpr61 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF + ; GFX90A-NEXT: $sgpr62_sgpr63 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.52, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.55.Flow29: ; GFX90A-NEXT: successors: %bb.45(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr60_sgpr61, implicit-def $scc + ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr62_sgpr63, implicit-def $scc ; GFX90A-NEXT: S_BRANCH %bb.45 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.56.bb90: ; GFX90A-NEXT: successors: %bb.60(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 - ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr53 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $sgpr64_sgpr65, implicit $exec - ; GFX90A-NEXT: renamable $vgpr12 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr16_vgpr17 = DS_READ_B64_gfx9 killed renamable $vgpr12, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3) - ; GFX90A-NEXT: renamable $vgpr12 = COPY renamable $sgpr21, implicit $exec - ; GFX90A-NEXT: renamable $vgpr18_vgpr19 = DS_READ_B64_gfx9 killed renamable $vgpr12, 0, 0, implicit $exec :: (load (s64) from %ir.7, addrspace 3) - ; GFX90A-NEXT: renamable $vgpr12 = COPY renamable $sgpr22, implicit $exec - ; GFX90A-NEXT: renamable $vgpr14_vgpr15 = DS_READ_B64_gfx9 killed renamable $vgpr12, 0, 0, implicit $exec :: (load (s64) from %ir.8, addrspace 3) - ; GFX90A-NEXT: renamable $vgpr12 = COPY renamable $sgpr46, implicit $exec - ; GFX90A-NEXT: renamable $vgpr13 = V_ALIGNBIT_B32_opsel_e64 0, killed $sgpr47, 0, killed $vgpr12, 0, 1, 0, 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr52 = V_ALIGNBIT_B32_opsel_e64 0, $vgpr19, 0, $vgpr18, 0, 1, 0, 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr19 = V_CNDMASK_B32_e64 0, 0, 0, 1, $sgpr12_sgpr13, implicit $exec - ; GFX90A-NEXT: renamable $vgpr17 = V_ALIGNBIT_B32_opsel_e64 0, $vgpr17, 0, $vgpr16, 0, 1, 0, 0, implicit $exec + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr56_sgpr57:0x000000000000000F, $sgpr62_sgpr63, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: renamable $vgpr12 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $sgpr66_sgpr67, implicit $exec + ; GFX90A-NEXT: renamable $vgpr13 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr14 = COPY renamable $sgpr21, implicit $exec + ; GFX90A-NEXT: renamable $vgpr22_vgpr23 = DS_READ_B64_gfx9 killed renamable $vgpr14, 0, 0, implicit $exec :: (load (s64) from %ir.7, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr20_vgpr21 = DS_READ_B64_gfx9 killed renamable $vgpr13, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr13 = COPY renamable $sgpr22, implicit $exec + ; GFX90A-NEXT: renamable $vgpr18_vgpr19 = DS_READ_B64_gfx9 killed renamable $vgpr13, 0, 0, implicit $exec :: (load (s64) from %ir.8, addrspace 3) + ; GFX90A-NEXT: renamable $sgpr18_sgpr19 = S_LSHR_B64 killed renamable $sgpr56_sgpr57, 1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vgpr16_vgpr17 = V_LSHRREV_B64_e64 1, $vgpr22_vgpr23, implicit $exec + ; GFX90A-NEXT: renamable $vgpr17 = V_CNDMASK_B32_e64 0, 0, 0, 1, $sgpr12_sgpr13, implicit $exec + ; GFX90A-NEXT: renamable $vgpr32_vgpr33 = V_LSHRREV_B64_e64 1, $vgpr20_vgpr21, implicit $exec ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_OR_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $vgpr12 = COPY renamable $vgpr16, implicit $exec + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_OR_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $vgpr14 = COPY renamable $vgpr20, implicit $exec ; GFX90A-NEXT: S_BRANCH %bb.60 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.57: ; GFX90A-NEXT: successors: %bb.7(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr40_sgpr41, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr19 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr52 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr22 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr18 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr14 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr19 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr12 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr20 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr32 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr16 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr15 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr53 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 0 - ; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_MOV_B64 0 ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 0 @@ -812,79 +818,80 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.58.bb105: ; GFX90A-NEXT: successors: %bb.3(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr40_sgpr41, $sgpr56_sgpr57:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr4_vgpr5:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr0 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr22_vgpr23 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr26_vgpr27 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: renamable $vgpr0 = COPY renamable $sgpr23, implicit $exec - ; GFX90A-NEXT: renamable $vgpr20_vgpr21 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.419, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr24_vgpr25 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.419, addrspace 3) ; GFX90A-NEXT: renamable $vgpr0 = COPY renamable $sgpr21, implicit $exec ; GFX90A-NEXT: renamable $vgpr2_vgpr3 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.7, addrspace 3) ; GFX90A-NEXT: renamable $vgpr0 = COPY killed renamable $sgpr33, implicit $exec - ; GFX90A-NEXT: renamable $vgpr12_vgpr13 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.420, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr14_vgpr15 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.420, addrspace 3) ; GFX90A-NEXT: renamable $vgpr0 = COPY renamable $sgpr22, implicit $exec - ; GFX90A-NEXT: renamable $vgpr24_vgpr25 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.8, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr28_vgpr29 = DS_READ_B64_gfx9 killed renamable $vgpr0, 0, 0, implicit $exec :: (load (s64) from %ir.8, addrspace 3) ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 -1 ; GFX90A-NEXT: S_BRANCH %bb.3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.59.bb85: ; GFX90A-NEXT: successors: %bb.56(0x40000000), %bb.60(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr56_sgpr57:0x000000000000000F, $sgpr62_sgpr63, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr10 = V_OR_B32_e32 1, $vgpr8, implicit $exec ; GFX90A-NEXT: renamable $vgpr11 = COPY renamable $vgpr9, implicit $exec ; GFX90A-NEXT: renamable $vgpr12 = FLAT_LOAD_UBYTE renamable $vgpr10_vgpr11, 0, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i86) ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr12, implicit $exec - ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = COPY renamable $sgpr36_sgpr37 - ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = COPY renamable $sgpr36_sgpr37 ; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr32 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr20 = IMPLICIT_DEF ; GFX90A-NEXT: renamable $vgpr16 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr52 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr18 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF - ; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF implicit-def $vgpr14 - ; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12 + ; GFX90A-NEXT: renamable $vgpr22 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr12 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr19 = IMPLICIT_DEF implicit-def $vgpr18 + ; GFX90A-NEXT: renamable $sgpr18 = IMPLICIT_DEF + ; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF ; GFX90A-NEXT: $sgpr54_sgpr55 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.56, implicit $exec ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.60.Flow31: ; GFX90A-NEXT: successors: %bb.61(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr54_sgpr55, implicit-def $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_MOV_B64 0 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.61.Flow30: ; GFX90A-NEXT: successors: %bb.55(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr3, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19:0x0000000000000003, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr62_sgpr63, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x0000000000000003, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_XOR_B64 $exec, -1, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_XOR_B64 $exec, -1, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr60_sgpr61 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc - ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr52_sgpr53, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_AND_B64 killed renamable $sgpr64_sgpr65, $exec, implicit-def dead $scc + ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_OR_B64 killed renamable $sgpr52_sgpr53, killed renamable $sgpr56_sgpr57, implicit-def dead $scc ; GFX90A-NEXT: S_BRANCH %bb.55 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.62.bb140: ; GFX90A-NEXT: successors: %bb.68(0x40000000), %bb.63(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 -1 + ; GFX90A-NEXT: renamable $sgpr24_sgpr25 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr30_sgpr31, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.68, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.63.Flow13: ; GFX90A-NEXT: successors: %bb.64(0x40000000), %bb.66(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: $vcc = S_ANDN2_B64 $exec, killed renamable $sgpr36_sgpr37, implicit-def dead $scc + ; GFX90A-NEXT: $vcc = S_ANDN2_B64 $exec, killed renamable $sgpr24_sgpr25, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.66, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.64.bb159: ; GFX90A-NEXT: successors: %bb.67(0x40000000), %bb.65(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vcc = V_CMP_NE_U32_e64 0, killed $vgpr30, implicit $exec ; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec @@ -893,104 +900,106 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.65.Flow10: ; GFX90A-NEXT: successors: %bb.66(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $sgpr12_sgpr13 = S_ANDN2_SAVEEXEC_B64 $sgpr12_sgpr13, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.66.Flow14: ; GFX90A-NEXT: successors: %bb.8(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $sgpr68_sgpr69 = COPY $exec ; GFX90A-NEXT: S_BRANCH %bb.8 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.67.bb161: ; GFX90A-NEXT: successors: %bb.65(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr21, killed $vgpr23, implicit $exec - ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr25, implicit $exec - ; GFX90A-NEXT: renamable $vgpr3 = V_OR_B32_e32 killed $vgpr13, killed $vgpr3, implicit $exec + ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr25, killed $vgpr27, implicit $exec + ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr29, implicit $exec + ; GFX90A-NEXT: renamable $vgpr3 = V_OR_B32_e32 killed $vgpr15, killed $vgpr3, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr3, killed $vgpr2, implicit $exec ; GFX90A-NEXT: renamable $vgpr3 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U32_sdwa 0, killed $vgpr53, 0, $vgpr3, 0, 0, 6, implicit $exec + ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U32_sdwa 0, killed $vgpr12, 0, $vgpr3, 0, 0, 6, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = V_CNDMASK_B32_e64 0, 0, 0, killed $vgpr2, killed $vcc, implicit $exec - ; GFX90A-NEXT: renamable $vgpr4 = V_OR_B32_e32 killed $vgpr52, killed $vgpr15, implicit $exec + ; GFX90A-NEXT: renamable $vgpr4 = V_OR_B32_e32 killed $vgpr16, killed $vgpr19, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr4, killed $vgpr2, implicit $exec - ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U32_sdwa 0, killed $vgpr19, 0, $vgpr3, 0, 0, 6, implicit $exec + ; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U32_sdwa 0, killed $vgpr17, 0, $vgpr3, 0, 0, 6, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = V_CNDMASK_B32_e64 0, 0, 0, killed $vgpr2, killed $vcc, implicit $exec - ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr17, implicit $exec + ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr32, implicit $exec ; GFX90A-NEXT: DS_WRITE2_B32_gfx9 killed renamable $vgpr3, killed renamable $vgpr2, renamable $vgpr3, 0, 1, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, align 4, addrspace 3) ; GFX90A-NEXT: S_BRANCH %bb.65 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.68.bb174: ; GFX90A-NEXT: successors: %bb.72(0x40000000), %bb.69(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 - ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr26 = V_OR_B32_e32 1, $vgpr24, implicit $exec - ; GFX90A-NEXT: renamable $vgpr48 = V_OR_B32_e32 $vgpr26, $vgpr22, implicit $exec - ; GFX90A-NEXT: renamable $vgpr34 = V_OR_B32_e32 $vgpr48, $vgpr20, implicit $exec - ; GFX90A-NEXT: renamable $vgpr28 = V_CNDMASK_B32_e64 0, $vgpr34, 0, 0, $sgpr12_sgpr13, implicit $exec - ; GFX90A-NEXT: renamable $vgpr38 = V_OR_B32_e32 $vgpr28, $vgpr2, implicit $exec - ; GFX90A-NEXT: renamable $vgpr36 = V_OR_B32_e32 $vgpr38, $vgpr12, implicit $exec - ; GFX90A-NEXT: renamable $vgpr32 = V_OR_B32_e32 $vgpr36, $vgpr14, implicit $exec - ; GFX90A-NEXT: renamable $vgpr50 = V_CNDMASK_B32_e64 0, 0, 0, $vgpr32, killed $sgpr12_sgpr13, implicit $exec + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000F, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr24_vgpr25:0x000000000000000F, $vgpr26_vgpr27:0x000000000000000F, $vgpr28_vgpr29:0x000000000000000F, $vgpr32_vgpr33:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: renamable $agpr0 = COPY killed renamable $vgpr32, implicit $exec + ; GFX90A-NEXT: renamable $vgpr32 = V_OR_B32_e32 1, $vgpr28, implicit $exec + ; GFX90A-NEXT: renamable $vgpr52 = V_OR_B32_e32 $vgpr32, $vgpr26, implicit $exec + ; GFX90A-NEXT: renamable $vgpr38 = V_OR_B32_e32 $vgpr52, $vgpr24, implicit $exec + ; GFX90A-NEXT: renamable $vgpr34 = V_CNDMASK_B32_e64 0, $vgpr38, 0, 0, $sgpr12_sgpr13, implicit $exec + ; GFX90A-NEXT: renamable $vgpr50 = V_OR_B32_e32 $vgpr34, $vgpr2, implicit $exec + ; GFX90A-NEXT: renamable $vgpr48 = V_OR_B32_e32 $vgpr50, $vgpr14, implicit $exec + ; GFX90A-NEXT: renamable $vgpr36 = V_OR_B32_e32 $vgpr48, $vgpr18, implicit $exec + ; GFX90A-NEXT: renamable $vgpr54 = V_CNDMASK_B32_e64 0, 0, 0, $vgpr36, killed $sgpr12_sgpr13, implicit $exec ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_MOV_B64 -1 ; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr28_sgpr29, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.72, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.69.Flow: ; GFX90A-NEXT: successors: %bb.70(0x40000000), %bb.71(0x40000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $agpr0_agpr1:0x0000000000000003, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr52_vgpr53:0x0000000000000003, $vgpr54_vgpr55:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: $vcc = S_ANDN2_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.71, implicit $vcc ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.70.bb186: ; GFX90A-NEXT: successors: %bb.71(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $agpr0_agpr1:0x0000000000000003, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr52_vgpr53:0x0000000000000003, $vgpr54_vgpr55:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: renamable $vgpr4_vgpr5 = V_LSHLREV_B64_e64 3, killed $vgpr4_vgpr5, implicit $exec ; GFX90A-NEXT: renamable $vgpr2 = COPY renamable $sgpr27, implicit $exec ; GFX90A-NEXT: renamable $vgpr4, renamable $vcc = V_ADD_CO_U32_e64 killed $sgpr26, $vgpr4, 0, implicit $exec ; GFX90A-NEXT: renamable $vgpr2, dead renamable $vcc = V_ADDC_U32_e64 killed $vgpr2, killed $vgpr5, killed $vcc, 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr27 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: renamable $vgpr49 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr35 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr39 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr37 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr29 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr51 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: renamable $vgpr33 = COPY renamable $vgpr27, implicit $exec - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr27, renamable $vgpr26_vgpr27, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr33 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: renamable $vgpr53 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr39 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr51 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr49 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr35 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr55 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: renamable $vgpr37 = COPY renamable $vgpr33, implicit $exec + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr33, renamable $vgpr32_vgpr33, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: renamable $vgpr5 = COPY renamable $sgpr21, implicit $exec + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr5, killed renamable $vgpr52_vgpr53, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr13 = COPY killed renamable $sgpr22, implicit $exec + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr13, killed renamable $vgpr38_vgpr39, 0, 0, implicit $exec :: (store (s64) into %ir.8, addrspace 3) + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr33, killed renamable $vgpr50_vgpr51, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr5, killed renamable $vgpr48_vgpr49, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) - ; GFX90A-NEXT: renamable $vgpr12 = COPY killed renamable $sgpr22, implicit $exec - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr12, killed renamable $vgpr34_vgpr35, 0, 0, implicit $exec :: (store (s64) into %ir.8, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr27, killed renamable $vgpr38_vgpr39, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr5, killed renamable $vgpr36_vgpr37, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr27, killed renamable $vgpr28_vgpr29, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr5, killed renamable $vgpr50_vgpr51, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr27, killed renamable $vgpr32_vgpr33, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 renamable $vgpr33, killed renamable $vgpr34_vgpr35, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr5, killed renamable $vgpr54_vgpr55, 0, 0, implicit $exec :: (store (s64) into %ir.7, addrspace 3) + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr33, killed renamable $vgpr36_vgpr37, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5) ; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5) ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.71.Flow9: ; GFX90A-NEXT: successors: %bb.63(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $agpr0_agpr1:0x0000000000000003, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $sgpr24_sgpr25 = S_MOV_B64 0 + ; GFX90A-NEXT: renamable $vgpr32 = COPY killed renamable $agpr0, implicit $exec ; GFX90A-NEXT: S_BRANCH %bb.63 ; GFX90A-NEXT: {{ $}} ; GFX90A-NEXT: bb.72.bb196: ; GFX90A-NEXT: successors: %bb.69(0x80000000) - ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr17, $vgpr19, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x0000000000000003, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 + ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr12, $vgpr17, $vgpr30, $vgpr31, $agpr0_agpr1:0x0000000000000003, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000C, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr14_vgpr15:0x000000000000000C, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x0000000000000003, $vgpr22_vgpr23:0x0000000000000003, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x000000000000000C, $vgpr28_vgpr29:0x000000000000000C, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr52_vgpr53:0x0000000000000003, $vgpr54_vgpr55:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3 ; GFX90A-NEXT: {{ $}} - ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 $vgpr50, killed $vgpr18, implicit $exec - ; GFX90A-NEXT: renamable $vgpr54 = V_OR_B32_e32 killed $vgpr2, killed $vgpr16, implicit $exec - ; GFX90A-NEXT: renamable $vgpr55 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec - ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr55, renamable $vgpr54_vgpr55, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) + ; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 $vgpr54, killed $vgpr22, implicit $exec + ; GFX90A-NEXT: renamable $vgpr20 = V_OR_B32_e32 killed $vgpr2, killed $vgpr20, implicit $exec + ; GFX90A-NEXT: renamable $vgpr21 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec + ; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr21, renamable $vgpr20_vgpr21, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3) ; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_MOV_B64 0 ; GFX90A-NEXT: S_BRANCH %bb.69 bb: diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll index 306fe33bfb7ac..8e12e7e03947b 100644 --- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll +++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll @@ -2523,7 +2523,7 @@ define amdgpu_kernel void @test_call_external_void_func_v3i16() #0 { ; CI-NEXT: s_add_u32 s36, s36, s3 ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 -; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0 +; CI-NEXT: buffer_load_dwordx2 v[3:4], off, s[0:3], 0 ; CI-NEXT: s_addc_u32 s37, s37, 0 ; CI-NEXT: s_mov_b64 s[6:7], s[0:1] ; CI-NEXT: s_mov_b64 s[0:1], s[36:37] @@ -2533,9 +2533,9 @@ define amdgpu_kernel void @test_call_external_void_func_v3i16() #0 { ; CI-NEXT: s_mov_b64 s[2:3], s[38:39] ; CI-NEXT: s_mov_b32 s32, 0 ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_alignbit_b32 v1, v3, v2, 16 -; CI-NEXT: v_mov_b32_e32 v0, v2 -; CI-NEXT: v_mov_b32_e32 v2, v3 +; CI-NEXT: v_lshr_b64 v[1:2], v[3:4], 16 +; CI-NEXT: v_mov_b32_e32 v0, v3 +; CI-NEXT: v_mov_b32_e32 v2, v4 ; CI-NEXT: s_swappc_b64 s[30:31], s[4:5] ; CI-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll index a92b99aa1e2c1..1c5f8c84e447d 100644 --- a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll +++ b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll @@ -173,11 +173,12 @@ ret: ; GCN-LABEL: {{^}}sink_ubfe_i64_span_midpoint: ; GCN: s_cbranch_scc{{[0-1]}} .LBB3_2 -; GCN: v_alignbit_b32 v[[LO:[0-9]+]], s{{[0-9]+}}, v{{[0-9]+}}, 30 -; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x7f, v[[LO]] +; GCN: s_lshr_b64 s[[[LO:[0-9]+]]:[[HI:[0-9]+]]], s[[[LO2:[0-9]+]]:[[HI2:[0-9]+]]], 30 +; GCN: s_and_b32 s{{[0-9]+}}, s[[LO]], 0x7f ; GCN: .LBB3_3: -; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xff, v[[LO]] +; GCN: s_lshr_b64 s[[[LO3:[0-9]+]]:[[HI3:[0-9]+]]], s[[[LO4:[0-9]+]]:[[HI4:[0-9]+]]], 30 +; GCN: s_and_b32 s{{[0-9]+}}, s[[LO3]], 0xff ; GCN: buffer_store_dwordx2 define amdgpu_kernel void @sink_ubfe_i64_span_midpoint(ptr addrspace(1) %out, i64 %arg1, i1 %arg) #0 { diff --git a/llvm/test/CodeGen/AMDGPU/fcanonicalize.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcanonicalize.bf16.ll index a4cdb0387df9a..d747fb7cce7dc 100644 --- a/llvm/test/CodeGen/AMDGPU/fcanonicalize.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/fcanonicalize.bf16.ll @@ -15,67 +15,67 @@ declare <32 x bfloat> @llvm.canonicalize.v32bf16(<32 x bfloat>) #0 declare <64 x bfloat> @llvm.canonicalize.v64bf16(<64 x bfloat>) #0 declare i32 @llvm.amdgcn.workitem.id.x() #0 -; GFX1250-LABEL: test_fold_canonicalize_undef_value_bf16: -; GFX1250: %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v0, s[0:1] -; GFX1250-NEXT: s_endpgm define amdgpu_kernel void @test_fold_canonicalize_undef_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_undef_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat undef) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_var_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_u16 v0, v0, s[0:1] -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX1250-NEXT: global_store_b16 v[0:1], v0, off -; GFX1250-NEXT: s_endpgm define amdgpu_kernel void @v_test_canonicalize_var_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v0, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: global_store_b16 v[0:1], v0, off +; GFX1250-NEXT: s_endpgm %val = load bfloat, ptr addrspace(1) %out %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val) store bfloat %canonicalized, ptr addrspace(1) poison ret void } -; GFX1250-LABEL: s_test_canonicalize_var_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v1, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_lshl_b32 s2, s2, 16 -; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_max_num_f32_e64 v0, s2, s2 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX1250-NEXT: global_store_b16 v1, v0, s[0:1] -; GFX1250-NEXT: s_endpgm define amdgpu_kernel void @s_test_canonicalize_var_bf16(ptr addrspace(1) %out, i16 zeroext %val.arg) #1 { +; GFX1250-LABEL: s_test_canonicalize_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v1, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_lshl_b32 s2, s2, 16 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e64 v0, s2, s2 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: global_store_b16 v1, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %val = bitcast i16 %val.arg to bfloat %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_build_vector_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v0, v0, v0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] define <2 x bfloat> @v_test_canonicalize_build_vector_v2bf16(bfloat %lo, bfloat %hi) #1 { +; GFX1250-LABEL: v_test_canonicalize_build_vector_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ins0 = insertelement <2 x bfloat> poison, bfloat %lo, i32 0 %ins1 = insertelement <2 x bfloat> %ins0, bfloat %hi, i32 1 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %ins1) @@ -83,22 +83,22 @@ define <2 x bfloat> @v_test_canonicalize_build_vector_v2bf16(bfloat %lo, bfloat } -; GFX1250-LABEL: v_test_canonicalize_fabs_var_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v1, 0x7fff, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm define amdgpu_kernel void @v_test_canonicalize_fabs_var_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fabs_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %val = load bfloat, ptr addrspace(1) %out %val.fabs = call bfloat @llvm.fabs.bf16(bfloat %val) %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val.fabs) @@ -107,22 +107,22 @@ define amdgpu_kernel void @v_test_canonicalize_fabs_var_bf16(ptr addrspace(1) %o } -; GFX1250-LABEL: v_test_canonicalize_fneg_fabs_var_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fneg_fabs_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %val = load bfloat, ptr addrspace(1) %out %val.fabs = call bfloat @llvm.fabs.bf16(bfloat %val) %val.fabs.fneg = fneg bfloat %val.fabs @@ -131,22 +131,22 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_bf16(ptr addrspace( ret void } -; GFX1250-LABEL: v_test_canonicalize_fneg_var_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm define amdgpu_kernel void @v_test_canonicalize_fneg_var_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fneg_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %val = load bfloat, ptr addrspace(1) %out %val.fneg = fneg bfloat %val %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val.fneg) @@ -154,22 +154,22 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_var_bf16(ptr addrspace(1) %o ret void } -; GFX1250-LABEL: v_test_no_denormals_canonicalize_fneg_var_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_var_bf16(ptr addrspace(1) %out) #2 { +; GFX1250-LABEL: v_test_no_denormals_canonicalize_fneg_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %val = load bfloat, ptr addrspace(1) %out %val.fneg = fneg bfloat %val %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat %val.fneg) @@ -177,22 +177,22 @@ define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_var_bf16(ptr ad ret void } -; GFX1250-LABEL: v_test_no_denormals_canonicalize_fneg_fabs_var_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_fabs_var_bf16(ptr addrspace(1) %out) #2 { +; GFX1250-LABEL: v_test_no_denormals_canonicalize_fneg_fabs_var_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_u16 v1, v0, s[0:1] +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %val = load bfloat, ptr addrspace(1) %out %val.fabs = call bfloat @llvm.fabs.bf16(bfloat %val) %val.fabs.fneg = fneg bfloat %val.fabs @@ -201,217 +201,231 @@ define amdgpu_kernel void @v_test_no_denormals_canonicalize_fneg_fabs_var_bf16(p ret void } +define amdgpu_kernel void @test_fold_canonicalize_p0_bf16(ptr addrspace(1) %out) #1 { ; GFX1250-LABEL: test_fold_canonicalize_p0_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v0, s[0:1] -; GFX1250-NEXT: s_endpgm - define amdgpu_kernel void @test_fold_canonicalize_p0_bf16(ptr addrspace(1) %out) #1 { +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0.0) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_n0_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff8000 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm -; GFX1250-NEXT: .Lfunc_end10: + define amdgpu_kernel void @test_fold_canonicalize_n0_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_n0_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff8000 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat -0.0) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_p1_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3f80 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_p1_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_p1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3f80 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 1.0) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_n1_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffffbf80 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm -; GFX1250-NEXT: .Lfunc_end12: + define amdgpu_kernel void @test_fold_canonicalize_n1_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_n1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffffbf80 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat -1.0) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_literal_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x4180 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_literal_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_literal_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x4180 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 16.0) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_default_denormals_fold_canonicalize_denormal0_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_default_denormals_fold_canonicalize_denormal0_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_default_denormals_fold_canonicalize_denormal0_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR03FF) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal0_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_bf16(ptr addrspace(1) %out) #3 { +; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal0_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR03FF) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_default_denormals_fold_canonicalize_denormal1_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff83ff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_default_denormals_fold_canonicalize_denormal1_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_default_denormals_fold_canonicalize_denormal1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff83ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR83FF) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal1_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff83ff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_bf16(ptr addrspace(1) %out) #3 { +; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xffff83ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR83FF) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_qnan_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c00 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_qnan_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c00 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR7C00) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg1_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg1_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat bitcast (i16 -1 to bfloat)) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg2_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg2_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat bitcast (i16 -2 to bfloat)) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_snan0_value_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c01 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_snan0_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan0_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c01 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR7C01) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_snan1_value_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7dff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_snan1_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan1_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7dff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xR7DFF) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_snan2_value_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfffffdff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_snan2_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan2_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfffffdff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xRFDFF) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_snan3_value_bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfffffc01 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_snan3_value_bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan3_value_bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfffffc01 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b16 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call bfloat @llvm.canonicalize.bf16(bfloat 0xRFC01) store bfloat %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_var_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX1250-NEXT: v_mov_b32_e32 v2, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v1, 0xffff0000, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_lshlrev_b32 v0, 16, v0 -; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 -; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @v_test_canonicalize_var_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v1, 0xffff0000, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr <2 x bfloat>, ptr addrspace(1) %out, i32 %tid %val = load <2 x bfloat>, ptr addrspace(1) %gep @@ -419,27 +433,28 @@ define amdgpu_kernel void @v_test_canonicalize_var_v2bf16(ptr addrspace(1) %out) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_fabs_var_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX1250-NEXT: v_mov_b32_e32 v2, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; GFX1250-NEXT: v_and_b32_e32 v0, 0x7fff, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX1250-NEXT: v_and_b32_e32 v1, 0x7fff, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 -; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fabs_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: v_and_b32_e32 v1, 0x7fff, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr <2 x bfloat>, ptr addrspace(1) %out, i32 %tid %val = load <2 x bfloat>, ptr addrspace(1) %gep @@ -448,27 +463,28 @@ define amdgpu_kernel void @v_test_canonicalize_fabs_var_v2bf16(ptr addrspace(1) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_fneg_fabs_var_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX1250-NEXT: v_mov_b32_e32 v2, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; GFX1250-NEXT: v_or_b32_e32 v0, 0x8000, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 -; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fneg_fabs_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX1250-NEXT: v_or_b32_e32 v0, 0x8000, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: v_or_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr <2 x bfloat>, ptr addrspace(1) %out, i32 %tid %val = load <2 x bfloat>, ptr addrspace(1) %gep @@ -478,27 +494,28 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_fabs_var_v2bf16(ptr addrspac store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_fneg_var_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; GFX1250-NEXT: v_mov_b32_e32 v2, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 -; GFX1250-NEXT: v_xor_b32_e32 v0, 0x8000, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 -; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @v_test_canonicalize_fneg_var_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: v_test_canonicalize_fneg_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_load_b32 v0, v0, s[0:1] scale_offset +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; GFX1250-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: v_xor_b32_e32 v1, 0x8000, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr <2 x bfloat>, ptr addrspace(1) %out, i32 %tid %val = load <2 x bfloat>, ptr addrspace(1) %gep @@ -507,781 +524,819 @@ define amdgpu_kernel void @v_test_canonicalize_fneg_var_v2bf16(ptr addrspace(1) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: s_test_canonicalize_var_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v2, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: s_and_b32 s3, s2, 0xffff0000 -; GFX1250-NEXT: s_lshl_b32 s2, s2, 16 -; GFX1250-NEXT: v_max_num_f32_e64 v0, s3, s3 -; GFX1250-NEXT: v_max_num_f32_e64 v1, s2, s2 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v1, v0 -; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @s_test_canonicalize_var_v2bf16(ptr addrspace(1) %out, i32 zeroext %val.arg) #1 { +; GFX1250-LABEL: s_test_canonicalize_var_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: s_and_b32 s3, s2, 0xffff0000 +; GFX1250-NEXT: s_lshl_b32 s2, s2, 16 +; GFX1250-NEXT: v_max_num_f32_e64 v0, s3, s3 +; GFX1250-NEXT: v_max_num_f32_e64 v1, s2, s2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v1, v0 +; GFX1250-NEXT: global_store_b32 v2, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %val = bitcast i32 %val.arg to <2 x bfloat> %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %val) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_p0_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v0, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_p0_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_p0_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> zeroinitializer) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_n0_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x80008000 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_n0_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_n0_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x80008000 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_p1_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3f803f80 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_p1_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_p1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3f803f80 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_n1_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xbf80bf80 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_n1_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_n1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xbf80bf80 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_literal_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x41804180 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_literal_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_literal_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x41804180 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_no_denormals_fold_canonicalize_denormal0_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff03ff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal0_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_no_denormals_fold_canonicalize_denormal0_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff03ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal0_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff03ff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal0_v2bf16(ptr addrspace(1) %out) #3 { +; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal0_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x3ff03ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_no_denormals_fold_canonicalize_denormal1_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x83ff83ff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_no_denormals_fold_canonicalize_denormal1_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_no_denormals_fold_canonicalize_denormal1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x83ff83ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal1_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x83ff83ff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_denormals_fold_canonicalize_denormal1_v2bf16(ptr addrspace(1) %out) #3 { +; GFX1250-LABEL: test_denormals_fold_canonicalize_denormal1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x83ff83ff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_qnan_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c007c00 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_qnan_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c007c00 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg1_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc07fc0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg1_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg1_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc07fc0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> bitcast (i32 -1 to <2 x bfloat>)) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg2_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc07fc0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_qnan_value_neg2_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_qnan_value_neg2_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7fc07fc0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_snan0_value_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c017c01 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_snan0_value_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan0_value_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7c017c01 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_snan1_value_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7dff7dff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_snan1_value_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan1_value_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7dff7dff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_snan2_value_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfdfffdff -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_snan2_value_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan2_value_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfdfffdff +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: test_fold_canonicalize_snan3_value_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfc01fc01 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @test_fold_canonicalize_snan3_value_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: test_fold_canonicalize_snan3_value_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0xfc01fc01 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> ) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_var_v3bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v2, 0xffff0000, v0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v0, v0, v0 -; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v2 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <3 x bfloat> @v_test_canonicalize_var_v3bf16(<3 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v3bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v2, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: v_max_num_f32_e32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v2 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %canonicalized = call <3 x bfloat> @llvm.canonicalize.v3bf16(<3 x bfloat> %val) ret <3 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_var_v4bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 -; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3 -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v2 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <4 x bfloat> @v_test_canonicalize_var_v4bf16(<4 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v2, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v3, v3, v3 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v2 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> %val) ret <4 x bfloat> %canonicalized } -; GFX1250-LABEL: s_test_canonicalize_undef_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b32 v0, v0, s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @s_test_canonicalize_undef_v2bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: s_test_canonicalize_undef_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b32 v0, v0, s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> undef) store <2 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_reg_undef_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <2 x bfloat> @v_test_canonicalize_reg_undef_v2bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_undef_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec = insertelement <2 x bfloat> poison, bfloat %val, i32 0 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) ret <2 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_undef_reg_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_perm_b32 v0, v0, s0, 0x5040100 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <2 x bfloat> @v_test_canonicalize_undef_reg_v2bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_undef_reg_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, v0, s0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec = insertelement <2 x bfloat> poison, bfloat %val, i32 1 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) ret <2 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_undef_lo_imm_hi_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v0, 1.0 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <2 x bfloat> @v_test_canonicalize_undef_lo_imm_hi_v2bf16() #1 { +; GFX1250-LABEL: v_test_canonicalize_undef_lo_imm_hi_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, 1.0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec = insertelement <2 x bfloat> undef, bfloat 1.0, i32 1 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) ret <2 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_imm_lo_undef_hi_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0x3f80 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <2 x bfloat> @v_test_canonicalize_imm_lo_undef_hi_v2bf16() #1 { +; GFX1250-LABEL: v_test_canonicalize_imm_lo_undef_hi_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0x3f80 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec = insertelement <2 x bfloat> undef, bfloat 1.0, i32 0 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) ret <2 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_undef_lo_k_hi_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0x41800000 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <2 x bfloat> @v_test_canonicalize_undef_lo_k_hi_v2bf16() #1 { +; GFX1250-LABEL: v_test_canonicalize_undef_lo_k_hi_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0x41800000 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec = insertelement <2 x bfloat> undef, bfloat 16.0, i32 1 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) ret <2 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_k_lo_undef_hi_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0x4180 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <2 x bfloat> @v_test_canonicalize_k_lo_undef_hi_v2bf16() #1 { +; GFX1250-LABEL: v_test_canonicalize_k_lo_undef_hi_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0x4180 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec = insertelement <2 x bfloat> undef, bfloat 16.0, i32 0 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec) ret <2 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_reg_k_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX1250-NEXT: s_movk_i32 s0, 0x4000 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <2 x bfloat> @v_test_canonicalize_reg_k_v2bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_k_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x4000 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec0 = insertelement <2 x bfloat> poison, bfloat %val, i32 0 %vec1 = insertelement <2 x bfloat> %vec0, bfloat 2.0, i32 1 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec1) ret <2 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_k_reg_v2bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX1250-NEXT: s_movk_i32 s0, 0x4000 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_perm_b32 v0, v0, s0, 0x5040100 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <2 x bfloat> @v_test_canonicalize_k_reg_v2bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_k_reg_v2bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x4000 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, v0, s0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec0 = insertelement <2 x bfloat> poison, bfloat 2.0, i32 0 %vec1 = insertelement <2 x bfloat> %vec0, bfloat %val, i32 1 %canonicalized = call <2 x bfloat> @llvm.canonicalize.v2bf16(<2 x bfloat> %vec1) ret <2 x bfloat> %canonicalized } -; GFX1250-LABEL: s_test_canonicalize_undef_v4bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_mov_b32_e32 v0, 0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_mov_b32_e32 v1, v0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_store_b64 v0, v[0:1], s[0:1] -; GFX1250-NEXT: s_endpgm + define amdgpu_kernel void @s_test_canonicalize_undef_v4bf16(ptr addrspace(1) %out) #1 { +; GFX1250-LABEL: s_test_canonicalize_undef_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-NEXT: v_mov_b32_e32 v0, 0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_mov_b32_e32 v1, v0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: global_store_b64 v0, v[0:1], s[0:1] +; GFX1250-NEXT: s_endpgm %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> undef) store <4 x bfloat> %canonicalized, ptr addrspace(1) %out ret void } -; GFX1250-LABEL: v_test_canonicalize_reg_undef_undef_undef_v4bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_mov_b32 v1, 0x7fc07fc0 :: v_dual_lshlrev_b32 v0, 16, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) -; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <4 x bfloat> @v_test_canonicalize_reg_undef_undef_undef_v4bf16(bfloat %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_undef_undef_undef_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v1, 0x7fc07fc0 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_max_num_f32_e32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec = insertelement <4 x bfloat> poison, bfloat %val, i32 0 %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> %vec) ret <4 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_reg_reg_undef_undef_v4bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v0, v0, v0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 -; GFX1250-NEXT: v_mov_b32_e32 v1, 0x7fc07fc0 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <4 x bfloat> @v_test_canonicalize_reg_reg_undef_undef_v4bf16(bfloat %val0, bfloat %val1) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_reg_undef_undef_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 +; GFX1250-NEXT: v_mov_b32_e32 v1, 0x7fc07fc0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec0 = insertelement <4 x bfloat> poison, bfloat %val0, i32 0 %vec1 = insertelement <4 x bfloat> %vec0, bfloat %val1, i32 1 %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> %vec1) ret <4 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_reg_undef_reg_reg_v4bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v2, 16, v2 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v1, v1, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v2 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <4 x bfloat> @v_test_canonicalize_reg_undef_reg_reg_v4bf16(bfloat %val0, bfloat %val1, bfloat %val2) #1 { +; GFX1250-LABEL: v_test_canonicalize_reg_undef_reg_reg_v4bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v2, 16, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v1, v1, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX1250-NEXT: s_movk_i32 s0, 0x7fc0 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX1250-NEXT: v_perm_b32 v0, s0, v0, 0x5040100 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %vec0 = insertelement <4 x bfloat> poison, bfloat %val0, i32 0 %vec1 = insertelement <4 x bfloat> %vec0, bfloat %val1, i32 2 %vec2 = insertelement <4 x bfloat> %vec1, bfloat %val2, i32 3 %canonicalized = call <4 x bfloat> @llvm.canonicalize.v4bf16(<4 x bfloat> %vec2) ret <4 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_var_v6bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX1250-NEXT: v_and_b32_e32 v4, 0xffff0000, v1 -; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 -; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_max_num_f32 v0, v0, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v5 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v4 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v3 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <6 x bfloat> @v_test_canonicalize_var_v6bf16(<6 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v6bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX1250-NEXT: v_and_b32_e32 v4, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v4 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v3 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %canonicalized = call <6 x bfloat> @llvm.canonicalize.v6bf16(<6 x bfloat> %val) ret <6 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_var_v8bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 -; GFX1250-NEXT: v_and_b32_e32 v4, 0xffff0000, v3 -; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX1250-NEXT: v_and_b32_e32 v6, 0xffff0000, v1 -; GFX1250-NEXT: v_and_b32_e32 v7, 0xffff0000, v0 -; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_lshlrev_b32 v0, 16, v0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v2, 16, v2 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_dual_max_num_f32 v4, v4, v4 :: v_dual_max_num_f32 v6, v6, v6 -; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_max_num_f32 v0, v0, v0 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 -; GFX1250-NEXT: v_max_num_f32_e32 v3, v3, v3 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v7 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v6 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v5 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v4 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <8 x bfloat> @v_test_canonicalize_var_v8bf16(<8 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v8bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 +; GFX1250-NEXT: v_and_b32_e32 v4, 0xffff0000, v3 +; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX1250-NEXT: v_and_b32_e32 v6, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v7, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_lshlrev_b32 v0, 16, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v1, 16, v1 :: v_dual_lshlrev_b32 v2, 16, v2 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_dual_max_num_f32 v4, v4, v4 :: v_dual_max_num_f32 v6, v6, v6 +; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_max_num_f32 v0, v0, v0 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_max_num_f32_e32 v3, v3, v3 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v6 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v4 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %canonicalized = call <8 x bfloat> @llvm.canonicalize.v8bf16(<8 x bfloat> %val) ret <8 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_var_v12bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 -; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX1250-NEXT: v_and_b32_e32 v7, 0xffff0000, v4 -; GFX1250-NEXT: v_and_b32_e32 v8, 0xffff0000, v3 -; GFX1250-NEXT: v_and_b32_e32 v9, 0xffff0000, v2 -; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 -; GFX1250-NEXT: v_and_b32_e32 v10, 0xffff0000, v1 -; GFX1250-NEXT: v_and_b32_e32 v11, 0xffff0000, v0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX1250-NEXT: v_dual_max_num_f32 v6, v6, v6 :: v_dual_max_num_f32 v5, v5, v5 -; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_max_num_f32 v8, v8, v8 -; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v10, v10, v10 -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v11, v11, v11 -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 -; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v11 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v10 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v9 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v8 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v7 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v6 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <12 x bfloat> @v_test_canonicalize_var_v12bf16(<12 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v12bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX1250-NEXT: v_and_b32_e32 v7, 0xffff0000, v4 +; GFX1250-NEXT: v_and_b32_e32 v8, 0xffff0000, v3 +; GFX1250-NEXT: v_and_b32_e32 v9, 0xffff0000, v2 +; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 +; GFX1250-NEXT: v_and_b32_e32 v10, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v11, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v6, v6, v6 :: v_dual_max_num_f32 v5, v5, v5 +; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_max_num_f32 v8, v8, v8 +; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v10, v10, v10 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v11, v11, v11 +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v11 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v10 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v9 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v8 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v7 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v6 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %canonicalized = call <12 x bfloat> @llvm.canonicalize.v12bf16(<12 x bfloat> %val) ret <12 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_var_v16bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 -; GFX1250-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; GFX1250-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 -; GFX1250-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_lshlrev_b32 v5, 16, v5 -; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_lshlrev_b32 v6, 16, v6 -; GFX1250-NEXT: v_and_b32_e32 v11, 0xffff0000, v4 -; GFX1250-NEXT: v_and_b32_e32 v12, 0xffff0000, v3 -; GFX1250-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 -; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 -; GFX1250-NEXT: v_and_b32_e32 v14, 0xffff0000, v1 -; GFX1250-NEXT: v_and_b32_e32 v15, 0xffff0000, v0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_lshlrev_b32 v2, 16, v2 -; GFX1250-NEXT: v_max_num_f32_e32 v6, v6, v6 -; GFX1250-NEXT: v_dual_max_num_f32 v10, v10, v10 :: v_dual_max_num_f32 v5, v5, v5 -; GFX1250-NEXT: v_dual_max_num_f32 v11, v11, v11 :: v_dual_max_num_f32 v12, v12, v12 -; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v14, v14, v14 -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v15, v15, v15 -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 -; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v15 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v14 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v13 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v12 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v11 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v10 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v9 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v8 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <16 x bfloat> @v_test_canonicalize_var_v16bf16(<16 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v16bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v8, 0xffff0000, v7 +; GFX1250-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX1250-NEXT: v_and_b32_e32 v9, 0xffff0000, v6 +; GFX1250-NEXT: v_and_b32_e32 v10, 0xffff0000, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_lshlrev_b32 v5, 16, v5 +; GFX1250-NEXT: v_dual_max_num_f32 v7, v7, v7 :: v_dual_lshlrev_b32 v6, 16, v6 +; GFX1250-NEXT: v_and_b32_e32 v11, 0xffff0000, v4 +; GFX1250-NEXT: v_and_b32_e32 v12, 0xffff0000, v3 +; GFX1250-NEXT: v_and_b32_e32 v13, 0xffff0000, v2 +; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 +; GFX1250-NEXT: v_and_b32_e32 v14, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v15, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_lshlrev_b32 v2, 16, v2 +; GFX1250-NEXT: v_max_num_f32_e32 v6, v6, v6 +; GFX1250-NEXT: v_dual_max_num_f32 v10, v10, v10 :: v_dual_max_num_f32 v5, v5, v5 +; GFX1250-NEXT: v_dual_max_num_f32 v11, v11, v11 :: v_dual_max_num_f32 v12, v12, v12 +; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v14, v14, v14 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v15, v15, v15 +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v15 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v14 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v13 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v12 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v11 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v10 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v9 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v8 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %canonicalized = call <16 x bfloat> @llvm.canonicalize.v16bf16(<16 x bfloat> %val) ret <16 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_var_v32bf16: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v16, 0xffff0000, v15 -; GFX1250-NEXT: v_and_b32_e32 v18, 0xffff0000, v13 -; GFX1250-NEXT: v_and_b32_e32 v20, 0xffff0000, v11 -; GFX1250-NEXT: v_and_b32_e32 v22, 0xffff0000, v9 -; GFX1250-NEXT: v_and_b32_e32 v24, 0xffff0000, v7 -; GFX1250-NEXT: v_dual_max_num_f32 v16, v16, v16 :: v_dual_lshlrev_b32 v15, 16, v15 -; GFX1250-NEXT: v_and_b32_e32 v17, 0xffff0000, v14 -; GFX1250-NEXT: v_dual_lshlrev_b32 v14, 16, v14 :: v_dual_lshlrev_b32 v13, 16, v13 -; GFX1250-NEXT: v_max_num_f32_e32 v18, v18, v18 -; GFX1250-NEXT: v_and_b32_e32 v19, 0xffff0000, v12 -; GFX1250-NEXT: v_dual_lshlrev_b32 v12, 16, v12 :: v_dual_lshlrev_b32 v11, 16, v11 -; GFX1250-NEXT: v_max_num_f32_e32 v20, v20, v20 -; GFX1250-NEXT: v_and_b32_e32 v21, 0xffff0000, v10 -; GFX1250-NEXT: v_dual_lshlrev_b32 v10, 16, v10 :: v_dual_lshlrev_b32 v9, 16, v9 -; GFX1250-NEXT: v_max_num_f32_e32 v22, v22, v22 -; GFX1250-NEXT: v_and_b32_e32 v23, 0xffff0000, v8 -; GFX1250-NEXT: v_dual_lshlrev_b32 v8, 16, v8 :: v_dual_lshlrev_b32 v7, 16, v7 -; GFX1250-NEXT: v_max_num_f32_e32 v24, v24, v24 -; GFX1250-NEXT: v_and_b32_e32 v25, 0xffff0000, v6 -; GFX1250-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX1250-NEXT: v_and_b32_e32 v26, 0xffff0000, v5 -; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX1250-NEXT: v_and_b32_e32 v27, 0xffff0000, v4 -; GFX1250-NEXT: v_and_b32_e32 v28, 0xffff0000, v3 -; GFX1250-NEXT: v_and_b32_e32 v29, 0xffff0000, v2 -; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 -; GFX1250-NEXT: v_and_b32_e32 v30, 0xffff0000, v1 -; GFX1250-NEXT: v_and_b32_e32 v31, 0xffff0000, v0 -; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 -; GFX1250-NEXT: v_dual_max_num_f32 v15, v15, v15 :: v_dual_lshlrev_b32 v2, 16, v2 -; GFX1250-NEXT: v_dual_max_num_f32 v17, v17, v17 :: v_dual_max_num_f32 v14, v14, v14 -; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v19, v19, v19 -; GFX1250-NEXT: v_dual_max_num_f32 v12, v12, v12 :: v_dual_max_num_f32 v11, v11, v11 -; GFX1250-NEXT: v_dual_max_num_f32 v21, v21, v21 :: v_dual_max_num_f32 v10, v10, v10 -; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v23, v23, v23 -; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_max_num_f32 v7, v7, v7 -; GFX1250-NEXT: v_dual_max_num_f32 v25, v25, v25 :: v_dual_max_num_f32 v6, v6, v6 -; GFX1250-NEXT: v_dual_max_num_f32 v26, v26, v26 :: v_dual_max_num_f32 v5, v5, v5 -; GFX1250-NEXT: v_dual_max_num_f32 v27, v27, v27 :: v_dual_max_num_f32 v28, v28, v28 -; GFX1250-NEXT: v_dual_max_num_f32 v29, v29, v29 :: v_dual_max_num_f32 v30, v30, v30 -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v31, v31, v31 -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 -; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v31 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v30 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v29 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v28 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v27 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v26 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v25 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v24 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v8, v8, v23 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v9, v9, v22 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v10, v10, v21 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v11, v11, v20 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v12, v12, v19 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v13, v13, v18 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v14, v14, v17 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v15, v15, v16 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <32 x bfloat> @v_test_canonicalize_var_v32bf16(<32 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v32bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v16, 0xffff0000, v15 +; GFX1250-NEXT: v_and_b32_e32 v18, 0xffff0000, v13 +; GFX1250-NEXT: v_and_b32_e32 v20, 0xffff0000, v11 +; GFX1250-NEXT: v_and_b32_e32 v22, 0xffff0000, v9 +; GFX1250-NEXT: v_and_b32_e32 v24, 0xffff0000, v7 +; GFX1250-NEXT: v_dual_max_num_f32 v16, v16, v16 :: v_dual_lshlrev_b32 v15, 16, v15 +; GFX1250-NEXT: v_and_b32_e32 v17, 0xffff0000, v14 +; GFX1250-NEXT: v_dual_lshlrev_b32 v14, 16, v14 :: v_dual_lshlrev_b32 v13, 16, v13 +; GFX1250-NEXT: v_max_num_f32_e32 v18, v18, v18 +; GFX1250-NEXT: v_and_b32_e32 v19, 0xffff0000, v12 +; GFX1250-NEXT: v_dual_lshlrev_b32 v12, 16, v12 :: v_dual_lshlrev_b32 v11, 16, v11 +; GFX1250-NEXT: v_max_num_f32_e32 v20, v20, v20 +; GFX1250-NEXT: v_and_b32_e32 v21, 0xffff0000, v10 +; GFX1250-NEXT: v_dual_lshlrev_b32 v10, 16, v10 :: v_dual_lshlrev_b32 v9, 16, v9 +; GFX1250-NEXT: v_max_num_f32_e32 v22, v22, v22 +; GFX1250-NEXT: v_and_b32_e32 v23, 0xffff0000, v8 +; GFX1250-NEXT: v_dual_lshlrev_b32 v8, 16, v8 :: v_dual_lshlrev_b32 v7, 16, v7 +; GFX1250-NEXT: v_max_num_f32_e32 v24, v24, v24 +; GFX1250-NEXT: v_and_b32_e32 v25, 0xffff0000, v6 +; GFX1250-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX1250-NEXT: v_and_b32_e32 v26, 0xffff0000, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX1250-NEXT: v_and_b32_e32 v27, 0xffff0000, v4 +; GFX1250-NEXT: v_and_b32_e32 v28, 0xffff0000, v3 +; GFX1250-NEXT: v_and_b32_e32 v29, 0xffff0000, v2 +; GFX1250-NEXT: v_dual_lshlrev_b32 v4, 16, v4 :: v_dual_lshlrev_b32 v3, 16, v3 +; GFX1250-NEXT: v_and_b32_e32 v30, 0xffff0000, v1 +; GFX1250-NEXT: v_and_b32_e32 v31, 0xffff0000, v0 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v1, 16, v1 +; GFX1250-NEXT: v_dual_max_num_f32 v15, v15, v15 :: v_dual_lshlrev_b32 v2, 16, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v17, v17, v17 :: v_dual_max_num_f32 v14, v14, v14 +; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v19, v19, v19 +; GFX1250-NEXT: v_dual_max_num_f32 v12, v12, v12 :: v_dual_max_num_f32 v11, v11, v11 +; GFX1250-NEXT: v_dual_max_num_f32 v21, v21, v21 :: v_dual_max_num_f32 v10, v10, v10 +; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v23, v23, v23 +; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_max_num_f32 v7, v7, v7 +; GFX1250-NEXT: v_dual_max_num_f32 v25, v25, v25 :: v_dual_max_num_f32 v6, v6, v6 +; GFX1250-NEXT: v_dual_max_num_f32 v26, v26, v26 :: v_dual_max_num_f32 v5, v5, v5 +; GFX1250-NEXT: v_dual_max_num_f32 v27, v27, v27 :: v_dual_max_num_f32 v28, v28, v28 +; GFX1250-NEXT: v_dual_max_num_f32 v29, v29, v29 :: v_dual_max_num_f32 v30, v30, v30 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v31, v31, v31 +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v2, v2, v2 +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v31 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v30 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v29 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v28 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v27 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v26 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v25 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v24 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v8, v8, v23 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v9, v9, v22 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v10, v10, v21 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v11, v11, v20 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v12, v12, v19 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v13, v13, v18 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v14, v14, v17 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v15, v15, v16 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %canonicalized = call <32 x bfloat> @llvm.canonicalize.v32bf16(<32 x bfloat> %val) ret <32 x bfloat> %canonicalized } -; GFX1250-LABEL: v_test_canonicalize_var_v64bf16: -; GFX1250: %bb.0: -; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: scratch_load_b32 v31, off, s32 -; GFX1250-NEXT: v_and_b32_e32 v81, 0xffff0000, v0 -; GFX1250-NEXT: v_and_b32_e32 v38, 0xffff0000, v24 -; GFX1250-NEXT: v_lshlrev_b32_e32 v24, 16, v24 -; GFX1250-NEXT: v_and_b32_e32 v39, 0xffff0000, v23 -; GFX1250-NEXT: v_lshlrev_b32_e32 v23, 16, v23 -; GFX1250-NEXT: v_and_b32_e32 v80, 0xffff0000, v6 -; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v6, 16, v6 -; GFX1250-NEXT: v_and_b32_e32 v82, 0xffff0000, v1 -; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX1250-NEXT: v_max_num_f32_e32 v81, v81, v81 -; GFX1250-NEXT: v_and_b32_e32 v83, 0xffff0000, v2 -; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX1250-NEXT: v_and_b32_e32 v34, 0xffff0000, v28 -; GFX1250-NEXT: v_lshlrev_b32_e32 v28, 16, v28 -; GFX1250-NEXT: v_and_b32_e32 v35, 0xffff0000, v27 -; GFX1250-NEXT: v_lshlrev_b32_e32 v27, 16, v27 -; GFX1250-NEXT: v_and_b32_e32 v36, 0xffff0000, v26 -; GFX1250-NEXT: v_lshlrev_b32_e32 v26, 16, v26 -; GFX1250-NEXT: v_and_b32_e32 v48, 0xffff0000, v22 -; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v82, v82, v82 -; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v83, v83, v83 -; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v24, v24, v24 -; GFX1250-NEXT: v_max_num_f32_e32 v39, v39, v39 -; GFX1250-NEXT: v_dual_max_num_f32 v23, v23, v23 :: v_dual_max_num_f32 v48, v48, v48 -; GFX1250-NEXT: v_and_b32_e32 v32, 0xffff0000, v30 -; GFX1250-NEXT: v_lshlrev_b32_e32 v30, 16, v30 -; GFX1250-NEXT: v_and_b32_e32 v33, 0xffff0000, v29 -; GFX1250-NEXT: v_lshlrev_b32_e32 v29, 16, v29 -; GFX1250-NEXT: v_and_b32_e32 v37, 0xffff0000, v25 -; GFX1250-NEXT: v_dual_lshlrev_b32 v25, 16, v25 :: v_dual_lshlrev_b32 v22, 16, v22 -; GFX1250-NEXT: v_and_b32_e32 v49, 0xffff0000, v21 -; GFX1250-NEXT: v_lshlrev_b32_e32 v21, 16, v21 -; GFX1250-NEXT: v_and_b32_e32 v50, 0xffff0000, v20 -; GFX1250-NEXT: v_lshlrev_b32_e32 v20, 16, v20 -; GFX1250-NEXT: v_and_b32_e32 v51, 0xffff0000, v19 -; GFX1250-NEXT: v_lshlrev_b32_e32 v19, 16, v19 -; GFX1250-NEXT: v_and_b32_e32 v52, 0xffff0000, v18 -; GFX1250-NEXT: v_lshlrev_b32_e32 v18, 16, v18 -; GFX1250-NEXT: v_and_b32_e32 v53, 0xffff0000, v17 -; GFX1250-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; GFX1250-NEXT: v_and_b32_e32 v54, 0xffff0000, v16 -; GFX1250-NEXT: v_lshlrev_b32_e32 v16, 16, v16 -; GFX1250-NEXT: v_and_b32_e32 v55, 0xffff0000, v15 -; GFX1250-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; GFX1250-NEXT: v_and_b32_e32 v64, 0xffff0000, v14 -; GFX1250-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; GFX1250-NEXT: v_and_b32_e32 v65, 0xffff0000, v13 -; GFX1250-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; GFX1250-NEXT: v_and_b32_e32 v66, 0xffff0000, v12 -; GFX1250-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; GFX1250-NEXT: v_and_b32_e32 v67, 0xffff0000, v11 -; GFX1250-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; GFX1250-NEXT: v_and_b32_e32 v68, 0xffff0000, v10 -; GFX1250-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; GFX1250-NEXT: v_and_b32_e32 v69, 0xffff0000, v9 -; GFX1250-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; GFX1250-NEXT: v_and_b32_e32 v70, 0xffff0000, v8 -; GFX1250-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; GFX1250-NEXT: v_and_b32_e32 v71, 0xffff0000, v7 -; GFX1250-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v81 -; GFX1250-NEXT: v_and_b32_e32 v81, 0xffff0000, v5 -; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v82 -; GFX1250-NEXT: v_and_b32_e32 v82, 0xffff0000, v4 -; GFX1250-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v83 -; GFX1250-NEXT: v_and_b32_e32 v83, 0xffff0000, v3 -; GFX1250-NEXT: v_dual_max_num_f32 v32, v32, v32 :: v_dual_lshlrev_b32 v3, 16, v3 -; GFX1250-NEXT: v_dual_max_num_f32 v27, v27, v27 :: v_dual_max_num_f32 v36, v36, v36 -; GFX1250-NEXT: v_dual_max_num_f32 v26, v26, v26 :: v_dual_max_num_f32 v37, v37, v37 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v23, v23, v39 -; GFX1250-NEXT: v_dual_max_num_f32 v30, v30, v30 :: v_dual_max_num_f32 v33, v33, v33 -; GFX1250-NEXT: v_dual_max_num_f32 v29, v29, v29 :: v_dual_max_num_f32 v34, v34, v34 -; GFX1250-NEXT: v_dual_max_num_f32 v28, v28, v28 :: v_dual_max_num_f32 v35, v35, v35 -; GFX1250-NEXT: v_dual_max_num_f32 v25, v25, v25 :: v_dual_max_num_f32 v38, v38, v38 -; GFX1250-NEXT: v_dual_max_num_f32 v22, v22, v22 :: v_dual_max_num_f32 v49, v49, v49 -; GFX1250-NEXT: v_dual_max_num_f32 v21, v21, v21 :: v_dual_max_num_f32 v50, v50, v50 -; GFX1250-NEXT: v_dual_max_num_f32 v20, v20, v20 :: v_dual_max_num_f32 v51, v51, v51 -; GFX1250-NEXT: v_dual_max_num_f32 v19, v19, v19 :: v_dual_max_num_f32 v52, v52, v52 -; GFX1250-NEXT: v_dual_max_num_f32 v18, v18, v18 :: v_dual_max_num_f32 v53, v53, v53 -; GFX1250-NEXT: v_dual_max_num_f32 v17, v17, v17 :: v_dual_max_num_f32 v54, v54, v54 -; GFX1250-NEXT: v_dual_max_num_f32 v16, v16, v16 :: v_dual_max_num_f32 v55, v55, v55 -; GFX1250-NEXT: v_dual_max_num_f32 v15, v15, v15 :: v_dual_max_num_f32 v64, v64, v64 -; GFX1250-NEXT: v_dual_max_num_f32 v14, v14, v14 :: v_dual_max_num_f32 v65, v65, v65 -; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v66, v66, v66 -; GFX1250-NEXT: v_dual_max_num_f32 v12, v12, v12 :: v_dual_max_num_f32 v67, v67, v67 -; GFX1250-NEXT: v_dual_max_num_f32 v11, v11, v11 :: v_dual_max_num_f32 v68, v68, v68 -; GFX1250-NEXT: v_dual_max_num_f32 v10, v10, v10 :: v_dual_max_num_f32 v69, v69, v69 -; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v70, v70, v70 -; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_max_num_f32 v71, v71, v71 -; GFX1250-NEXT: v_dual_max_num_f32 v80, v80, v80 :: v_dual_max_num_f32 v81, v81, v81 -; GFX1250-NEXT: v_dual_max_num_f32 v82, v82, v82 :: v_dual_max_num_f32 v83, v83, v83 -; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 -; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_max_num_f32 v6, v6, v6 -; GFX1250-NEXT: v_max_num_f32_e32 v7, v7, v7 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v26, v26, v36 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v83 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v82 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v81 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v80 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v71 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v8, v8, v70 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v9, v9, v69 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v10, v10, v68 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v11, v11, v67 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v12, v12, v66 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v13, v13, v65 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v14, v14, v64 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v15, v15, v55 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v16, v16, v54 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v17, v17, v53 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v18, v18, v52 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v19, v19, v51 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v20, v20, v50 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v21, v21, v49 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v22, v22, v48 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v24, v24, v38 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v25, v25, v37 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v27, v27, v35 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v28, v28, v34 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v29, v29, v33 -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v30, v30, v32 -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_and_b32_e32 v39, 0xffff0000, v31 -; GFX1250-NEXT: v_lshlrev_b32_e32 v31, 16, v31 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX1250-NEXT: v_max_num_f32_e32 v36, v39, v39 -; GFX1250-NEXT: v_max_num_f32_e32 v31, v31, v31 -; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-NEXT: v_cvt_pk_bf16_f32 v31, v31, v36 -; GFX1250-NEXT: s_set_pc_i64 s[30:31] + define <64 x bfloat> @v_test_canonicalize_var_v64bf16(<64 x bfloat> %val) #1 { +; GFX1250-LABEL: v_test_canonicalize_var_v64bf16: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: scratch_load_b32 v31, off, s32 +; GFX1250-NEXT: v_and_b32_e32 v81, 0xffff0000, v0 +; GFX1250-NEXT: v_and_b32_e32 v38, 0xffff0000, v24 +; GFX1250-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX1250-NEXT: v_and_b32_e32 v39, 0xffff0000, v23 +; GFX1250-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX1250-NEXT: v_and_b32_e32 v80, 0xffff0000, v6 +; GFX1250-NEXT: v_dual_lshlrev_b32 v0, 16, v0 :: v_dual_lshlrev_b32 v6, 16, v6 +; GFX1250-NEXT: v_and_b32_e32 v82, 0xffff0000, v1 +; GFX1250-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX1250-NEXT: v_max_num_f32_e32 v81, v81, v81 +; GFX1250-NEXT: v_and_b32_e32 v83, 0xffff0000, v2 +; GFX1250-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX1250-NEXT: v_and_b32_e32 v34, 0xffff0000, v28 +; GFX1250-NEXT: v_lshlrev_b32_e32 v28, 16, v28 +; GFX1250-NEXT: v_and_b32_e32 v35, 0xffff0000, v27 +; GFX1250-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX1250-NEXT: v_and_b32_e32 v36, 0xffff0000, v26 +; GFX1250-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; GFX1250-NEXT: v_and_b32_e32 v48, 0xffff0000, v22 +; GFX1250-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v82, v82, v82 +; GFX1250-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v83, v83, v83 +; GFX1250-NEXT: v_dual_max_num_f32 v2, v2, v2 :: v_dual_max_num_f32 v24, v24, v24 +; GFX1250-NEXT: v_max_num_f32_e32 v39, v39, v39 +; GFX1250-NEXT: v_dual_max_num_f32 v23, v23, v23 :: v_dual_max_num_f32 v48, v48, v48 +; GFX1250-NEXT: v_and_b32_e32 v32, 0xffff0000, v30 +; GFX1250-NEXT: v_lshlrev_b32_e32 v30, 16, v30 +; GFX1250-NEXT: v_and_b32_e32 v33, 0xffff0000, v29 +; GFX1250-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; GFX1250-NEXT: v_and_b32_e32 v37, 0xffff0000, v25 +; GFX1250-NEXT: v_dual_lshlrev_b32 v25, 16, v25 :: v_dual_lshlrev_b32 v22, 16, v22 +; GFX1250-NEXT: v_and_b32_e32 v49, 0xffff0000, v21 +; GFX1250-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX1250-NEXT: v_and_b32_e32 v50, 0xffff0000, v20 +; GFX1250-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX1250-NEXT: v_and_b32_e32 v51, 0xffff0000, v19 +; GFX1250-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX1250-NEXT: v_and_b32_e32 v52, 0xffff0000, v18 +; GFX1250-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX1250-NEXT: v_and_b32_e32 v53, 0xffff0000, v17 +; GFX1250-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX1250-NEXT: v_and_b32_e32 v54, 0xffff0000, v16 +; GFX1250-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX1250-NEXT: v_and_b32_e32 v55, 0xffff0000, v15 +; GFX1250-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX1250-NEXT: v_and_b32_e32 v64, 0xffff0000, v14 +; GFX1250-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX1250-NEXT: v_and_b32_e32 v65, 0xffff0000, v13 +; GFX1250-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX1250-NEXT: v_and_b32_e32 v66, 0xffff0000, v12 +; GFX1250-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX1250-NEXT: v_and_b32_e32 v67, 0xffff0000, v11 +; GFX1250-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX1250-NEXT: v_and_b32_e32 v68, 0xffff0000, v10 +; GFX1250-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX1250-NEXT: v_and_b32_e32 v69, 0xffff0000, v9 +; GFX1250-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX1250-NEXT: v_and_b32_e32 v70, 0xffff0000, v8 +; GFX1250-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX1250-NEXT: v_and_b32_e32 v71, 0xffff0000, v7 +; GFX1250-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v81 +; GFX1250-NEXT: v_and_b32_e32 v81, 0xffff0000, v5 +; GFX1250-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v1, v82 +; GFX1250-NEXT: v_and_b32_e32 v82, 0xffff0000, v4 +; GFX1250-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v2, v2, v83 +; GFX1250-NEXT: v_and_b32_e32 v83, 0xffff0000, v3 +; GFX1250-NEXT: v_dual_max_num_f32 v32, v32, v32 :: v_dual_lshlrev_b32 v3, 16, v3 +; GFX1250-NEXT: v_dual_max_num_f32 v27, v27, v27 :: v_dual_max_num_f32 v36, v36, v36 +; GFX1250-NEXT: v_dual_max_num_f32 v26, v26, v26 :: v_dual_max_num_f32 v37, v37, v37 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v23, v23, v39 +; GFX1250-NEXT: v_dual_max_num_f32 v30, v30, v30 :: v_dual_max_num_f32 v33, v33, v33 +; GFX1250-NEXT: v_dual_max_num_f32 v29, v29, v29 :: v_dual_max_num_f32 v34, v34, v34 +; GFX1250-NEXT: v_dual_max_num_f32 v28, v28, v28 :: v_dual_max_num_f32 v35, v35, v35 +; GFX1250-NEXT: v_dual_max_num_f32 v25, v25, v25 :: v_dual_max_num_f32 v38, v38, v38 +; GFX1250-NEXT: v_dual_max_num_f32 v22, v22, v22 :: v_dual_max_num_f32 v49, v49, v49 +; GFX1250-NEXT: v_dual_max_num_f32 v21, v21, v21 :: v_dual_max_num_f32 v50, v50, v50 +; GFX1250-NEXT: v_dual_max_num_f32 v20, v20, v20 :: v_dual_max_num_f32 v51, v51, v51 +; GFX1250-NEXT: v_dual_max_num_f32 v19, v19, v19 :: v_dual_max_num_f32 v52, v52, v52 +; GFX1250-NEXT: v_dual_max_num_f32 v18, v18, v18 :: v_dual_max_num_f32 v53, v53, v53 +; GFX1250-NEXT: v_dual_max_num_f32 v17, v17, v17 :: v_dual_max_num_f32 v54, v54, v54 +; GFX1250-NEXT: v_dual_max_num_f32 v16, v16, v16 :: v_dual_max_num_f32 v55, v55, v55 +; GFX1250-NEXT: v_dual_max_num_f32 v15, v15, v15 :: v_dual_max_num_f32 v64, v64, v64 +; GFX1250-NEXT: v_dual_max_num_f32 v14, v14, v14 :: v_dual_max_num_f32 v65, v65, v65 +; GFX1250-NEXT: v_dual_max_num_f32 v13, v13, v13 :: v_dual_max_num_f32 v66, v66, v66 +; GFX1250-NEXT: v_dual_max_num_f32 v12, v12, v12 :: v_dual_max_num_f32 v67, v67, v67 +; GFX1250-NEXT: v_dual_max_num_f32 v11, v11, v11 :: v_dual_max_num_f32 v68, v68, v68 +; GFX1250-NEXT: v_dual_max_num_f32 v10, v10, v10 :: v_dual_max_num_f32 v69, v69, v69 +; GFX1250-NEXT: v_dual_max_num_f32 v9, v9, v9 :: v_dual_max_num_f32 v70, v70, v70 +; GFX1250-NEXT: v_dual_max_num_f32 v8, v8, v8 :: v_dual_max_num_f32 v71, v71, v71 +; GFX1250-NEXT: v_dual_max_num_f32 v80, v80, v80 :: v_dual_max_num_f32 v81, v81, v81 +; GFX1250-NEXT: v_dual_max_num_f32 v82, v82, v82 :: v_dual_max_num_f32 v83, v83, v83 +; GFX1250-NEXT: v_dual_max_num_f32 v3, v3, v3 :: v_dual_max_num_f32 v4, v4, v4 +; GFX1250-NEXT: v_dual_max_num_f32 v5, v5, v5 :: v_dual_max_num_f32 v6, v6, v6 +; GFX1250-NEXT: v_max_num_f32_e32 v7, v7, v7 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v26, v26, v36 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v3, v3, v83 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v4, v4, v82 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v5, v5, v81 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v6, v6, v80 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v7, v7, v71 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v8, v8, v70 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v9, v9, v69 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v10, v10, v68 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v11, v11, v67 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v12, v12, v66 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v13, v13, v65 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v14, v14, v64 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v15, v15, v55 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v16, v16, v54 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v17, v17, v53 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v18, v18, v52 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v19, v19, v51 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v20, v20, v50 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v21, v21, v49 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v22, v22, v48 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v24, v24, v38 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v25, v25, v37 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v27, v27, v35 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v28, v28, v34 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v29, v29, v33 +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v30, v30, v32 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: v_and_b32_e32 v39, 0xffff0000, v31 +; GFX1250-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_max_num_f32_e32 v36, v39, v39 +; GFX1250-NEXT: v_max_num_f32_e32 v31, v31, v31 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_cvt_pk_bf16_f32 v31, v31, v36 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %canonicalized = call <64 x bfloat> @llvm.canonicalize.v64bf16(<64 x bfloat> %val) ret <64 x bfloat> %canonicalized } diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll index 6a898fa799f3e..30bcdf97e26fd 100644 --- a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll @@ -231,22 +231,13 @@ define bfloat @v_copysign_bf16_f32(bfloat %mag, float %sign.f32) { ; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11TRUE16-LABEL: v_copysign_bf16_f32: -; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v1 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11FAKE16-LABEL: v_copysign_bf16_f32: -; GFX11FAKE16: ; %bb.0: -; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 -; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_copysign_bf16_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] %sign = fptrunc float %sign.f32 to bfloat %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign) ret bfloat %op @@ -298,22 +289,13 @@ define bfloat @v_copysign_bf16_f64(bfloat %mag, double %sign.f64) { ; GFX10-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; -; GFX11TRUE16-LABEL: v_copysign_bf16_f64: -; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v2 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11FAKE16-LABEL: v_copysign_bf16_f64: -; GFX11FAKE16: ; %bb.0: -; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 -; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_copysign_bf16_f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] %sign = fptrunc double %sign.f64 to bfloat %op = call bfloat @llvm.copysign.bf16(bfloat %mag, bfloat %sign) ret bfloat %op @@ -499,9 +481,10 @@ define amdgpu_ps i32 @s_copysign_bf16_f32(bfloat inreg %mag, float inreg %sign.f ; ; GFX11TRUE16-LABEL: s_copysign_bf16_f32: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s1 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s1 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 @@ -575,9 +558,10 @@ define amdgpu_ps i32 @s_copysign_bf16_f64(bfloat inreg %mag, double inreg %sign. ; ; GFX11TRUE16-LABEL: s_copysign_bf16_f64: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s2 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s2 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 @@ -1153,12 +1137,12 @@ define amdgpu_ps i32 @s_copysign_v2bf16(<2 x bfloat> inreg %arg_mag, <2 x bfloat define amdgpu_ps <3 x i16> @s_copysign_v3bf16(<3 x bfloat> inreg %arg_mag, <3 x bfloat> inreg %arg_sign) { ; GCN-LABEL: s_copysign_v3bf16: ; GCN: ; %bb.0: -; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s5 +; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s3 ; GCN-NEXT: v_mul_f32_e64 v1, 1.0, s4 -; GCN-NEXT: v_mul_f32_e64 v2, 1.0, s3 -; GCN-NEXT: v_mul_f32_e64 v3, 1.0, s2 +; GCN-NEXT: v_mul_f32_e64 v2, 1.0, s5 +; GCN-NEXT: v_mul_f32_e64 v3, 1.0, s0 ; GCN-NEXT: v_mul_f32_e64 v4, 1.0, s1 -; GCN-NEXT: v_mul_f32_e64 v5, 1.0, s0 +; GCN-NEXT: v_mul_f32_e64 v5, 1.0, s2 ; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1 ; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2 @@ -1166,45 +1150,45 @@ define amdgpu_ps <3 x i16> @s_copysign_v3bf16(<3 x bfloat> inreg %arg_mag, <3 x ; GCN-NEXT: v_bfe_u32 v4, v4, 16, 15 ; GCN-NEXT: v_bfe_u32 v3, v3, 16, 15 ; GCN-NEXT: v_and_b32_e32 v2, 0x8000, v2 -; GCN-NEXT: v_and_b32_e32 v1, 0x8000, v1 +; GCN-NEXT: v_and_b32_e32 v6, 0x8000, v1 ; GCN-NEXT: v_and_b32_e32 v0, 0x8000, v0 -; GCN-NEXT: v_or_b32_e32 v2, v5, v2 -; GCN-NEXT: v_or_b32_e32 v1, v4, v1 -; GCN-NEXT: v_or_b32_e32 v0, v3, v0 -; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GCN-NEXT: v_or_b32_e32 v2, v2, v1 -; GCN-NEXT: v_alignbit_b32 v1, v0, v1, 16 -; GCN-NEXT: v_readfirstlane_b32 s1, v1 -; GCN-NEXT: v_readfirstlane_b32 s0, v2 -; GCN-NEXT: v_readfirstlane_b32 s2, v0 +; GCN-NEXT: v_or_b32_e32 v1, v5, v2 +; GCN-NEXT: v_or_b32_e32 v2, v4, v6 +; GCN-NEXT: v_or_b32_e32 v3, v3, v0 +; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v2 +; GCN-NEXT: v_or_b32_e32 v4, v3, v0 +; GCN-NEXT: v_lshr_b64 v[2:3], v[0:1], 16 +; GCN-NEXT: v_readfirstlane_b32 s0, v4 +; GCN-NEXT: v_readfirstlane_b32 s1, v2 +; GCN-NEXT: v_readfirstlane_b32 s2, v1 ; GCN-NEXT: ; return to shader part epilog ; ; GFX7-LABEL: s_copysign_v3bf16: ; GFX7: ; %bb.0: -; GFX7-NEXT: v_mul_f32_e64 v1, 1.0, s4 -; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s5 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_mul_f32_e64 v2, 1.0, s3 -; GFX7-NEXT: v_mul_f32_e64 v4, 1.0, s1 +; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s3 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s4 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_mul_f32_e64 v3, 1.0, s2 -; GFX7-NEXT: v_mul_f32_e64 v5, 1.0, s0 -; GFX7-NEXT: v_and_b32_e32 v1, 0x8000, v1 +; GFX7-NEXT: v_mul_f32_e64 v1, 1.0, s5 +; GFX7-NEXT: v_mul_f32_e64 v4, 1.0, s1 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_mul_f32_e64 v3, 1.0, s0 +; GFX7-NEXT: v_mul_f32_e64 v5, 1.0, s2 +; GFX7-NEXT: v_and_b32_e32 v0, 0x8000, v0 ; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 15 -; GFX7-NEXT: v_and_b32_e32 v2, 0x8000, v2 +; GFX7-NEXT: v_and_b32_e32 v1, 0x8000, v1 ; GFX7-NEXT: v_bfe_u32 v5, v5, 16, 15 -; GFX7-NEXT: v_or_b32_e32 v1, v4, v1 -; GFX7-NEXT: v_and_b32_e32 v0, 0x8000, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0x8000, v2 ; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 15 -; GFX7-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v1 -; GFX7-NEXT: v_alignbit_b32 v1, v0, v1, 16 -; GFX7-NEXT: v_readfirstlane_b32 s1, v1 -; GFX7-NEXT: v_readfirstlane_b32 s0, v2 -; GFX7-NEXT: v_readfirstlane_b32 s2, v0 +; GFX7-NEXT: v_or_b32_e32 v1, v5, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 +; GFX7-NEXT: v_or_b32_e32 v4, v2, v0 +; GFX7-NEXT: v_lshr_b64 v[2:3], v[0:1], 16 +; GFX7-NEXT: v_readfirstlane_b32 s0, v4 +; GFX7-NEXT: v_readfirstlane_b32 s1, v2 +; GFX7-NEXT: v_readfirstlane_b32 s2, v1 ; GFX7-NEXT: ; return to shader part epilog ; ; GFX8-LABEL: s_copysign_v3bf16: @@ -3677,9 +3661,10 @@ define amdgpu_ps i16 @s_copysign_out_bf16_mag_bf16_sign_f32(bfloat inreg %mag, f ; ; GFX11TRUE16-LABEL: s_copysign_out_bf16_mag_bf16_sign_f32: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s1 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s1 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11TRUE16-NEXT: ; return to shader part epilog ; @@ -3744,9 +3729,10 @@ define amdgpu_ps i16 @s_copysign_out_bf16_mag_bf16_sign_f64(bfloat inreg %mag, d ; ; GFX11TRUE16-LABEL: s_copysign_out_bf16_mag_bf16_sign_f64: ; GFX11TRUE16: ; %bb.0: -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s2 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s2 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11TRUE16-NEXT: ; return to shader part epilog ; @@ -6700,15 +6686,16 @@ define <3 x bfloat> @v_copysign_out_v3bf16_mag_v3bf16_sign_v3f64(<3 x bfloat> %m ; GFX11TRUE16-LABEL: v_copysign_out_v3bf16_mag_v3bf16_sign_v3f64: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l -; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v5 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfi_b32 v2, 0x7fff, v2, v4 +; GFX11TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v7 +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v3 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0x7fff0000, v1, v3 -; GFX11TRUE16-NEXT: v_bfi_b32 v2, 0x7fff0000, v2, v7 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l +; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0x7fff, v1, v4 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_copysign_out_v3bf16_mag_v3bf16_sign_v3f64: diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll index 574c1042859aa..62847b15d3443 100644 --- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll @@ -776,22 +776,13 @@ define half @v_copysign_out_f16_mag_f16_sign_f32(half %mag, float %sign) { ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-TRUE16-LABEL: v_copysign_out_f16_mag_f16_sign_f32: -; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v1 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-FAKE16-LABEL: v_copysign_out_f16_mag_f16_sign_f32: -; GFX11-FAKE16: ; %bb.0: -; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 -; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_copysign_out_f16_mag_f16_sign_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] %sign.trunc = fptrunc float %sign to half %out = call half @llvm.copysign.f16(half %mag, half %sign.trunc) ret half %out @@ -823,22 +814,13 @@ define half @v_copysign_out_f16_mag_f16_sign_f64(half %mag, double %sign) { ; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; -; GFX11-TRUE16-LABEL: v_copysign_out_f16_mag_f16_sign_f64: -; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v2 -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-FAKE16-LABEL: v_copysign_out_f16_mag_f16_sign_f64: -; GFX11-FAKE16: ; %bb.0: -; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11-FAKE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 -; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX11-LABEL: v_copysign_out_f16_mag_f16_sign_f64: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] %sign.trunc = fptrunc double %sign to half %out = call half @llvm.copysign.f16(half %mag, half %sign.trunc) ret half %out @@ -1450,29 +1432,29 @@ define amdgpu_ps <3 x i16> @s_copysign_v3f16(<3 x half> inreg %arg_mag, <3 x hal ; SI: ; %bb.0: ; SI-NEXT: v_cvt_f16_f32_e32 v2, s4 ; SI-NEXT: v_cvt_f16_f32_e32 v3, s1 -; SI-NEXT: v_cvt_f16_f32_e32 v0, s5 -; SI-NEXT: v_cvt_f16_f32_e32 v1, s2 -; SI-NEXT: v_cvt_f16_f32_e32 v4, s3 -; SI-NEXT: v_cvt_f16_f32_e32 v5, s0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, s5 +; SI-NEXT: v_cvt_f16_f32_e32 v5, s2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, s3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, s0 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 ; SI-NEXT: s_brev_b32 s0, -2 ; SI-NEXT: v_bfi_b32 v2, s0, v3, v2 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 ; SI-NEXT: v_bfi_b32 v3, s0, v5, v4 ; SI-NEXT: v_bfi_b32 v0, s0, v1, v0 -; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; SI-NEXT: v_or_b32_e32 v2, v3, v1 -; SI-NEXT: v_alignbit_b32 v1, v0, v1, 16 -; SI-NEXT: v_readfirstlane_b32 s1, v1 -; SI-NEXT: v_readfirstlane_b32 s0, v2 -; SI-NEXT: v_readfirstlane_b32 s2, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2 +; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 16 +; SI-NEXT: v_or_b32_e32 v4, v4, v0 +; SI-NEXT: v_readfirstlane_b32 s0, v4 +; SI-NEXT: v_readfirstlane_b32 s1, v2 +; SI-NEXT: v_readfirstlane_b32 s2, v1 ; SI-NEXT: ; return to shader part epilog ; ; VI-LABEL: s_copysign_v3f16: @@ -2832,9 +2814,10 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f16_sign_f32(half inreg %mag, float ; ; GFX11-TRUE16-LABEL: s_copysign_out_f16_mag_f16_sign_f32: ; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s1 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s1 +; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-TRUE16-NEXT: ; return to shader part epilog ; @@ -2883,9 +2866,10 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f16_sign_f64(half inreg %mag, doubl ; ; GFX11-TRUE16-LABEL: s_copysign_out_f16_mag_f16_sign_f64: ; GFX11-TRUE16: ; %bb.0: -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, s0 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, s0 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e64 v1, 16, s2 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, s2 +; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1 ; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s0, v0 ; GFX11-TRUE16-NEXT: ; return to shader part epilog ; @@ -5590,15 +5574,16 @@ define <3 x half> @v_copysign_out_v3f16_mag_v3f16_sign_v3f64(<3 x half> %mag, <3 ; GFX11-TRUE16-LABEL: v_copysign_out_v3f16_mag_v3f16_sign_v3f64: ; GFX11-TRUE16: ; %bb.0: ; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l -; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff0000, v0, v5 +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v0.h +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v5 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0x7fff, v2, v4 +; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, 16, v7 +; GFX11-TRUE16-NEXT: v_bfi_b32 v0, 0x7fff, v0, v3 ; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-TRUE16-NEXT: v_bfi_b32 v1, 0x7fff0000, v1, v3 -; GFX11-TRUE16-NEXT: v_bfi_b32 v2, 0x7fff0000, v2, v7 -; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.h -; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h +; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v2.l +; GFX11-TRUE16-NEXT: v_bfi_b32 v1, 0x7fff, v1, v4 ; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11-FAKE16-LABEL: v_copysign_out_v3f16_mag_v3f16_sign_v3f64: diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir index 73cdcddbef135..a3b2191695734 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir @@ -209,8 +209,8 @@ body: | bb.0: ; GCN-LABEL: name: s_mov_b32_imm_65_copy_to_av_32 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65, implicit $exec - ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec - ; GCN-NEXT: S_ENDPGM 0, implicit [[AV_MOV_]] + ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[S_MOV_B32_]] + ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]] %0:sreg_32 = S_MOV_B32 65, implicit $exec %1:av_32 = COPY %0 S_ENDPGM 0, implicit %1 diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir index dfcf9a1f5c5ae..bec188e4e8378 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir @@ -240,8 +240,8 @@ body: | bb.0: ; GCN-LABEL: name: s_mov_b32_imm_literal_copy_s_to_av_32 ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 999 - ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec - ; GCN-NEXT: $agpr0 = COPY [[AV_MOV_]] + ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[S_MOV_B32_]] + ; GCN-NEXT: $agpr0 = COPY [[COPY]] ; GCN-NEXT: S_ENDPGM 0 %0:sreg_32 = S_MOV_B32 999 %1:av_32 = COPY %0 @@ -257,8 +257,8 @@ body: | bb.0: ; GCN-LABEL: name: v_mov_b32_imm_literal_copy_v_to_av_32 ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 999, implicit $exec - ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[V_MOV_B32_e32_]], implicit $exec - ; GCN-NEXT: $agpr0 = COPY [[AV_MOV_]] + ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[V_MOV_B32_e32_]] + ; GCN-NEXT: $agpr0 = COPY [[COPY]] ; GCN-NEXT: S_ENDPGM 0 %0:vgpr_32 = V_MOV_B32_e32 999, implicit $exec %1:av_32 = COPY %0 diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index-agpr.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index-agpr.mir new file mode 100644 index 0000000000000..32a209608a4d0 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index-agpr.mir @@ -0,0 +1,131 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -run-pass=si-fold-operands %s -o - | FileCheck %s + +--- +name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_fi_to_av +tracksRegLiveness: true +frameInfo: + maxAlignment: 4 + localFrameSize: 16384 +stack: + - { id: 0, size: 16384, alignment: 4, local-offset: 0 } +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_fi_to_av + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 + ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: SI_RETURN implicit [[AV_MOV_]] + %0:sreg_32 = S_MOV_B32 %stack.0 + %1:av_32 = AV_MOV_B32_IMM_PSEUDO %0, implicit $exec + SI_RETURN implicit %1 + +... + +--- +name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_fi_to_v +tracksRegLiveness: true +frameInfo: + maxAlignment: 4 + localFrameSize: 16384 +stack: + - { id: 0, size: 16384, alignment: 4, local-offset: 0 } +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_fi_to_v + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 + ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[AV_MOV_]], implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[COPY]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 %stack.0 + %1:av_32 = AV_MOV_B32_IMM_PSEUDO %0, implicit $exec + %2:vgpr_32 = COPY %1, implicit $exec + $vgpr0 = COPY %2 + SI_RETURN implicit $vgpr0 + +... + +--- +name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_lit_to_v +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_lit_to_v + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1234 + ; CHECK-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[AV_MOV_]], implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[COPY]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 1234 + %1:av_32 = AV_MOV_B32_IMM_PSEUDO %0, implicit $exec + %2:vgpr_32 = COPY %1, implicit $exec + $vgpr0 = COPY %2 + SI_RETURN implicit $vgpr0 + +... + +--- +name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_imm_to_v +tracksRegLiveness: true +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_mov_b32_imm_pseudo_from_s_mov_b32_imm_to_v + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 8, implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[V_MOV_B32_e32_]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 8 + %1:av_32 = AV_MOV_B32_IMM_PSEUDO %0, implicit $exec + %2:vgpr_32 = COPY %1, implicit $exec + $vgpr0 = COPY %2 + SI_RETURN implicit $vgpr0 + +... + +--- +name: fold_frame_index_av_regression_0 +tracksRegLiveness: true +frameInfo: + maxAlignment: 4 + localFrameSize: 16384 +stack: + - { id: 0, size: 16384, alignment: 4, local-offset: 0 } +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_regression_0 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[COPY]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 %stack.0 + %1:av_32 = COPY %0 + %2:vgpr_32 = COPY %1, implicit $exec + $vgpr0 = COPY %2 + SI_RETURN implicit $vgpr0 + +... + +--- +name: fold_frame_index_av_regression_1 +tracksRegLiveness: true +frameInfo: + maxAlignment: 4 + localFrameSize: 16384 +stack: + - { id: 0, size: 16384, alignment: 4, local-offset: 0 } +body: | + bb.0: + ; CHECK-LABEL: name: fold_frame_index_av_regression_1 + ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0 + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]], implicit $exec + ; CHECK-NEXT: $vgpr0 = COPY [[COPY]] + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 + %0:sreg_32 = S_MOV_B32 %stack.0 + %1:sreg_32 = S_MOV_B32 killed %0 + %2:sreg_64 = S_MOV_B64 0 + %3:av_32 = COPY %1 + %4:vgpr_32 = COPY %3, implicit $exec + $vgpr0 = COPY %4 + SI_RETURN implicit $vgpr0 + +... + diff --git a/llvm/test/CodeGen/AMDGPU/function-returns.ll b/llvm/test/CodeGen/AMDGPU/function-returns.ll index 38003f6075c35..0084d936ec03b 100644 --- a/llvm/test/CodeGen/AMDGPU/function-returns.ll +++ b/llvm/test/CodeGen/AMDGPU/function-returns.ll @@ -895,11 +895,11 @@ define <3 x i16> @v3i16_func_void() #0 { ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 -; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0 +; CI-NEXT: buffer_load_dwordx2 v[3:4], off, s[4:7], 0 ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: v_alignbit_b32 v1, v3, v2, 16 -; CI-NEXT: v_mov_b32_e32 v0, v2 -; CI-NEXT: v_mov_b32_e32 v2, v3 +; CI-NEXT: v_lshr_b64 v[1:2], v[3:4], 16 +; CI-NEXT: v_mov_b32_e32 v0, v3 +; CI-NEXT: v_mov_b32_e32 v2, v4 ; CI-NEXT: s_setpc_b64 s[30:31] ; ; GFX89-LABEL: v3i16_func_void: @@ -1008,7 +1008,7 @@ define <5 x i16> @v5i16_func_void() #0 { ; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 ; CI-NEXT: buffer_load_sshort v4, off, s[4:7], 0 offset:8 ; CI-NEXT: s_waitcnt vmcnt(1) -; CI-NEXT: v_alignbit_b32 v5, v1, v0, 16 +; CI-NEXT: v_lshr_b64 v[5:6], v[0:1], 16 ; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v1 ; CI-NEXT: v_mov_b32_e32 v2, v1 ; CI-NEXT: v_mov_b32_e32 v1, v5 diff --git a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll index eb5c5ef15ed56..6b094247e113c 100644 --- a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll +++ b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll @@ -111,22 +111,20 @@ define amdgpu_kernel void @f2(i32 %arg, i32 %arg1, i32 %arg2, i1 %arg3, i32 %arg ; GFX11-NEXT: s_cbranch_scc0 .LBB2_8 ; GFX11-NEXT: ; %bb.5: ; %bb18.preheader ; GFX11-NEXT: s_load_b128 s[28:31], s[16:17], 0x44 +; GFX11-NEXT: v_mov_b32_e32 v0, 0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_mul_hi_u32 s0, s29, s28 -; GFX11-NEXT: s_mul_i32 s1, s29, s28 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11-NEXT: v_alignbit_b32 v0, s0, s1, 1 +; GFX11-NEXT: s_mul_hi_u32 s1, s29, s28 +; GFX11-NEXT: s_mul_i32 s0, s29, s28 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_lshr_b64 s[0:1], s[0:1], 1 ; GFX11-NEXT: s_mov_b32 s1, 0 -; GFX11-NEXT: v_readfirstlane_b32 s0, v0 -; GFX11-NEXT: v_mov_b32_e32 v0, 0 ; GFX11-NEXT: s_or_b32 s0, s0, 1 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: s_lshr_b32 s0, s0, s30 -; GFX11-NEXT: s_mul_i32 s0, s0, s22 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_mul_i32 s0, s0, s22 ; GFX11-NEXT: s_mul_i32 s0, s0, s20 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX11-NEXT: s_or_b32 s0, s19, s0 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) ; GFX11-NEXT: s_lshl_b64 s[20:21], s[0:1], 1 ; GFX11-NEXT: s_mov_b32 s0, s1 ; GFX11-NEXT: global_load_u16 v1, v0, s[20:21] diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll index 103110af108de..fa6d878ad7556 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.make.buffer.rsrc.ll @@ -186,7 +186,7 @@ define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i64 inreg %nu ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; CHECK45-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 25 - ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], killed [[S_MOV_B32_]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[COPY5]], killed [[S_MOV_B32_]], implicit-def dead $scc ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[S_MOV_B32_1]], %subreg.sub0, killed [[S_LSHL_B32_]], %subreg.sub1 ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], killed [[REG_SEQUENCE2]], implicit-def dead $scc @@ -197,29 +197,28 @@ define amdgpu_ps ptr addrspace(8) @variable_top_half(ptr inreg %p, i64 inreg %nu ; CHECK45-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[DEF]], %subreg.sub0, killed [[S_LSHL_B32_1]], %subreg.sub1 ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 7 - ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], [[S_MOV_B32_3]], implicit-def dead $scc - ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 killed [[S_LSHR_B64_]], killed [[REG_SEQUENCE3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], killed [[S_MOV_B32_3]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_OR_B64_1:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_LSHR_B64_]], killed [[REG_SEQUENCE3]], implicit-def dead $scc ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 16384 ; CHECK45-NEXT: [[DEF2:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; CHECK45-NEXT: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[DEF2]], %subreg.sub0, killed [[S_MOV_B32_4]], %subreg.sub1 ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 killed [[S_OR_B64_1]], killed [[REG_SEQUENCE4]], implicit-def dead $scc ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 - ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub1 - ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_3]] - ; CHECK45-NEXT: [[V_ALIGNBIT_B32_fake16_e64_:%[0-9]+]]:vgpr_32 = V_ALIGNBIT_B32_fake16_e64 0, killed [[COPY8]], 0, [[COPY5]], 0, [[COPY9]], 0, 0, implicit $exec - ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[V_ALIGNBIT_B32_fake16_e64_]], implicit $exec - ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] - ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY10]], implicit $exec - ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY8]], implicit $exec + ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY9]], implicit $exec + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]] ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY11]], implicit $exec - ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:sreg_32 = COPY [[S_LSHR_B64_]].sub0 ; CHECK45-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY12]] ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY13]], implicit $exec - ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_3]] - ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_2]] - ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_]] - ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_3]] + ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_]] ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 4, i64 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc @@ -270,13 +269,13 @@ define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, ; CHECK45-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY4]], %subreg.sub1 ; CHECK45-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1 ; CHECK45-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 7 - ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], [[S_MOV_B32_]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_LSHR_B64_:%[0-9]+]]:sreg_64 = S_LSHR_B64 [[REG_SEQUENCE1]], killed [[S_MOV_B32_]], implicit-def dead $scc ; CHECK45-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[COPY3]] ; CHECK45-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 12 ; CHECK45-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[COPY6]], killed [[S_MOV_B32_1]], implicit-def dead $scc ; CHECK45-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; CHECK45-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_]], %subreg.sub1 - ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 killed [[S_LSHR_B64_]], killed [[REG_SEQUENCE2]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[S_LSHR_B64_]], killed [[REG_SEQUENCE2]], implicit-def dead $scc ; CHECK45-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 28 ; CHECK45-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY]], killed [[S_MOV_B32_3]], implicit-def dead $scc ; CHECK45-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_1]], %subreg.sub1 @@ -284,25 +283,24 @@ define amdgpu_ps ptr addrspace(8) @general_case(ptr inreg %p, i16 inreg %stride, ; CHECK45-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_1]].sub1 ; CHECK45-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub0 ; CHECK45-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 25 - ; CHECK45-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY8]], killed [[S_MOV_B32_4]], implicit-def dead $scc + ; CHECK45-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 killed [[COPY8]], killed [[S_MOV_B32_4]], implicit-def dead $scc ; CHECK45-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, killed [[S_LSHL_B32_2]], %subreg.sub1 ; CHECK45-NEXT: [[S_OR_B64_2:%[0-9]+]]:sreg_64 = S_OR_B64 [[REG_SEQUENCE]], killed [[REG_SEQUENCE4]], implicit-def dead $scc ; CHECK45-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_2]].sub1 - ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE1]].sub1 - ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; CHECK45-NEXT: [[V_ALIGNBIT_B32_fake16_e64_:%[0-9]+]]:vgpr_32 = V_ALIGNBIT_B32_fake16_e64 0, killed [[COPY10]], 0, [[COPY8]], 0, [[COPY11]], 0, 0, implicit $exec - ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[V_ALIGNBIT_B32_fake16_e64_]], implicit $exec - ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] - ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY12]], implicit $exec - ; CHECK45-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; CHECK45-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY10]], implicit $exec + ; CHECK45-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY11]], implicit $exec + ; CHECK45-NEXT: [[COPY12:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY12]] ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY13]], implicit $exec - ; CHECK45-NEXT: [[COPY14:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0 + ; CHECK45-NEXT: [[COPY14:%[0-9]+]]:sreg_32 = COPY [[S_LSHR_B64_]].sub0 ; CHECK45-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY14]] ; CHECK45-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY15]], implicit $exec - ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_3]] - ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_]] - ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_2]] + ; CHECK45-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_]] + ; CHECK45-NEXT: $sgpr2 = COPY [[V_READFIRSTLANE_B32_3]] + ; CHECK45-NEXT: $sgpr3 = COPY [[V_READFIRSTLANE_B32_1]] ; CHECK45-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1, $sgpr2, $sgpr3 %rsrc = call ptr addrspace(8) @llvm.amdgcn.make.buffer.rsrc.p8.p0(ptr %p, i16 %stride, i64 %numVals, i32 %flags) ret ptr addrspace(8) %rsrc diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll index bf8994e005fc5..3d9c2a29cb9c1 100644 --- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll +++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll @@ -815,9 +815,10 @@ define amdgpu_kernel void @test_umul_i24(ptr addrspace(1) %out, i32 %arg) { ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_lshr_b32 s2, s2, 9 -; SI-NEXT: v_mul_hi_u32 v0, s2, v0 -; SI-NEXT: s_mul_i32 s2, s2, 0xff803fe1 -; SI-NEXT: v_alignbit_b32 v0, v0, s2, 1 +; SI-NEXT: s_mul_i32 s4, s2, 0xff803fe1 +; SI-NEXT: v_mul_hi_u32 v1, s2, v0 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 1 ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm @@ -832,7 +833,7 @@ define amdgpu_kernel void @test_umul_i24(ptr addrspace(1) %out, i32 %arg) { ; VI-NEXT: s_lshr_b32 s0, s0, 9 ; VI-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s0, v0, 0 ; VI-NEXT: s_mov_b64 s[0:1], 0 -; VI-NEXT: v_alignbit_b32 v0, v1, v0, 1 +; VI-NEXT: v_lshrrev_b64 v[0:1], 1, v[0:1] ; VI-NEXT: s_nop 2 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm @@ -844,11 +845,11 @@ define amdgpu_kernel void @test_umul_i24(ptr addrspace(1) %out, i32 %arg) { ; GFX9-NEXT: s_mov_b32 s3, 0xf000 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: s_lshr_b32 s2, s2, 9 -; GFX9-NEXT: s_mul_hi_u32 s4, s2, 0xff803fe1 -; GFX9-NEXT: s_mul_i32 s2, s2, 0xff803fe1 -; GFX9-NEXT: v_mov_b32_e32 v0, s2 -; GFX9-NEXT: v_alignbit_b32 v0, s4, v0, 1 +; GFX9-NEXT: s_mul_hi_u32 s5, s2, 0xff803fe1 +; GFX9-NEXT: s_mul_i32 s4, s2, 0xff803fe1 +; GFX9-NEXT: s_lshr_b64 s[4:5], s[4:5], 1 ; GFX9-NEXT: s_mov_b32 s2, -1 +; GFX9-NEXT: v_mov_b32_e32 v0, s4 ; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX9-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll index ddec6af0af69e..697bcc3b8fb47 100644 --- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll @@ -1073,27 +1073,26 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 % ; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: s_sext_i32_i16 s1, s9 -; GCN-NEXT: v_mov_b32_e32 v0, s8 -; GCN-NEXT: v_alignbit_b32 v0, s1, v0, 24 -; GCN-NEXT: v_cvt_f32_i32_e32 v1, v0 ; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: s_sext_i32_i16 s0, s3 -; GCN-NEXT: v_mov_b32_e32 v2, s2 -; GCN-NEXT: v_alignbit_b32 v2, s0, v2, 24 -; GCN-NEXT: v_cvt_f32_i32_e32 v3, v2 -; GCN-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GCN-NEXT: v_xor_b32_e32 v0, v2, v0 -; GCN-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GCN-NEXT: v_or_b32_e32 v0, 1, v0 -; GCN-NEXT: v_mul_f32_e32 v2, v3, v4 +; GCN-NEXT: s_sext_i32_i16 s9, s9 +; GCN-NEXT: s_mov_b32 s5, s1 +; GCN-NEXT: s_lshr_b64 s[0:1], s[8:9], 24 +; GCN-NEXT: v_cvt_f32_i32_e32 v0, s0 +; GCN-NEXT: s_sext_i32_i16 s3, s3 +; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GCN-NEXT: s_xor_b32 s0, s2, s0 +; GCN-NEXT: s_ashr_i32 s0, s0, 30 +; GCN-NEXT: s_or_b32 s2, s0, 1 +; GCN-NEXT: v_mul_f32_e32 v2, v1, v2 ; GCN-NEXT: v_trunc_f32_e32 v2, v2 -; GCN-NEXT: v_mad_f32 v3, -v2, v1, v3 +; GCN-NEXT: v_mad_f32 v1, -v2, v0, v1 ; GCN-NEXT: v_cvt_i32_f32_e32 v2, v2 -; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1| -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GCN-NEXT: v_cmp_ge_f32_e64 s[0:1], |v1|, |v0| +; GCN-NEXT: s_and_b64 s[0:1], s[0:1], exec +; GCN-NEXT: s_cselect_b32 s0, s2, 0 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v2 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 24 ; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 @@ -1107,27 +1106,26 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 % ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_mov_b32 s5, s1 -; GCN-IR-NEXT: s_sext_i32_i16 s1, s9 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s8 -; GCN-IR-NEXT: v_alignbit_b32 v0, s1, v0, 24 -; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, v0 ; GCN-IR-NEXT: s_mov_b32 s4, s0 -; GCN-IR-NEXT: s_sext_i32_i16 s0, s3 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s2 -; GCN-IR-NEXT: v_alignbit_b32 v2, s0, v2, 24 -; GCN-IR-NEXT: v_cvt_f32_i32_e32 v3, v2 -; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v0, 30, v0 -; GCN-IR-NEXT: v_or_b32_e32 v0, 1, v0 -; GCN-IR-NEXT: v_mul_f32_e32 v2, v3, v4 +; GCN-IR-NEXT: s_sext_i32_i16 s9, s9 +; GCN-IR-NEXT: s_mov_b32 s5, s1 +; GCN-IR-NEXT: s_lshr_b64 s[0:1], s[8:9], 24 +; GCN-IR-NEXT: v_cvt_f32_i32_e32 v0, s0 +; GCN-IR-NEXT: s_sext_i32_i16 s3, s3 +; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GCN-IR-NEXT: s_xor_b32 s0, s2, s0 +; GCN-IR-NEXT: s_ashr_i32 s0, s0, 30 +; GCN-IR-NEXT: s_or_b32 s2, s0, 1 +; GCN-IR-NEXT: v_mul_f32_e32 v2, v1, v2 ; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2 -; GCN-IR-NEXT: v_mad_f32 v3, -v2, v1, v3 +; GCN-IR-NEXT: v_mad_f32 v1, -v2, v0, v1 ; GCN-IR-NEXT: v_cvt_i32_f32_e32 v2, v2 -; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1| -; GCN-IR-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc -; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v2 +; GCN-IR-NEXT: v_cmp_ge_f32_e64 s[0:1], |v1|, |v0| +; GCN-IR-NEXT: s_and_b64 s[0:1], s[0:1], exec +; GCN-IR-NEXT: s_cselect_b32 s0, s2, 0 +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s0, v2 ; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24 ; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0 diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll index 92d3277d5d3e3..bb22144b815a1 100644 --- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll +++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll @@ -4148,28 +4148,28 @@ define <2 x half> @mul_select_negk_negfabs_v2f16(<2 x i32> %c, <2 x half> %x, <2 ; -------------------------------------------------------------------------------- define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %y) { -; CI-SAFE-LABEL: select_fneg_posk_src_add_v2f16: -; CI-SAFE: ; %bb.0: -; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_add_f32_e32 v3, 4.0, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v2, 4.0, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3 -; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2 -; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc -; CI-SAFE-NEXT: s_setpc_b64 s[30:31] +; CI-LABEL: select_fneg_posk_src_add_v2f16: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_add_f32_e32 v3, 4.0, v3 +; CI-NEXT: v_add_f32_e32 v2, 4.0, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_or_b32_e32 v2, v2, v3 +; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v2 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc +; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-SAFE-LABEL: select_fneg_posk_src_add_v2f16: ; VI-SAFE: ; %bb.0: @@ -4229,21 +4229,6 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, < ; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] ; -; CI-NSZ-LABEL: select_fneg_posk_src_add_v2f16: -; CI-NSZ: ; %bb.0: -; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-NSZ-NEXT: v_sub_f32_e32 v2, -4.0, v2 -; CI-NSZ-NEXT: v_sub_f32_e32 v3, -4.0, v3 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc -; CI-NSZ-NEXT: s_setpc_b64 s[30:31] -; ; VI-NSZ-LABEL: select_fneg_posk_src_add_v2f16: ; VI-NSZ: ; %bb.0: ; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -4302,6 +4287,105 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, < ret <2 x half> %select } +define <2 x half> @select_fneg_posk_src_add_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %y) { +; CI-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_sub_f32_e32 v2, -4.0, v2 +; CI-NEXT: v_sub_f32_e32 v3, -4.0, v3 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_mov_b32_e32 v1, 0xc400 +; VI-NEXT: v_sub_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; VI-NEXT: v_sub_f16_e32 v2, -4.0, v2 +; VI-NEXT: v_mov_b32_e32 v3, 0x4000 +; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5] +; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_pk_add_f16 v1, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5] +; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-SAFE-TRUE16: ; %bb.0: +; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-SAFE-FAKE16: ; %bb.0: +; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-NSZ-TRUE16: ; %bb.0: +; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz: +; GFX11-NSZ-FAKE16: ; %bb.0: +; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0] +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq <2 x i32> %c, zeroinitializer + %add = fadd nsz <2 x half> %x, + %fneg = fneg <2 x half> %add + %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> + ret <2 x half> %select +} + define <2 x half> @select_fneg_posk_src_sub_v2f16(<2 x i32> %c, <2 x half> %x) { ; CI-SAFE-LABEL: select_fneg_posk_src_sub_v2f16: ; CI-SAFE: ; %bb.0: @@ -4704,34 +4788,34 @@ define <2 x half> @select_fneg_posk_src_fma_v2f16(<2 x i32> %c, <2 x half> %x, < } define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %z) { -; CI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16: -; CI-SAFE: ; %bb.0: -; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; CI-SAFE-NEXT: v_mul_f32_e32 v3, 4.0, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v3, v3, v5 -; CI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-SAFE-NEXT: v_add_f32_e32 v2, v2, v4 -; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3 -; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2 -; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2 -; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc -; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc -; CI-SAFE-NEXT: s_setpc_b64 s[30:31] +; CI-LABEL: select_fneg_posk_src_fmad_v2f16: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; CI-NEXT: v_mul_f32_e32 v3, 4.0, v3 +; CI-NEXT: v_add_f32_e32 v3, v3, v5 +; CI-NEXT: v_mul_f32_e32 v2, 4.0, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_add_f32_e32 v2, v2, v4 +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; CI-NEXT: v_or_b32_e32 v2, v2, v3 +; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v2 +; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc +; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16: ; VI-SAFE: ; %bb.0: @@ -4793,27 +4877,6 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, ; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 ; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] ; -; CI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16: -; CI-NSZ: ; %bb.0: -; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v4, v4 -; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v5, v5 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v4, v4 -; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v5, v5 -; CI-NSZ-NEXT: v_mul_f32_e32 v2, -4.0, v2 -; CI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v3 -; CI-NSZ-NEXT: v_sub_f32_e32 v2, v2, v4 -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 -; CI-NSZ-NEXT: v_sub_f32_e32 v3, v3, v5 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc -; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 -; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc -; CI-NSZ-NEXT: s_setpc_b64 s[30:31] -; ; VI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16: ; VI-NSZ: ; %bb.0: ; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -4873,6 +4936,112 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, ret <2 x half> %select } +define <2 x half> @select_fneg_posk_src_fmad_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %z) { +; CI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; CI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; CI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; CI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; CI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; CI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; CI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; CI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; CI-NEXT: v_mul_f32_e32 v2, -4.0, v2 +; CI-NEXT: v_mul_f32_e32 v3, -4.0, v3 +; CI-NEXT: v_sub_f32_e32 v2, v2, v4 +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; CI-NEXT: v_sub_f32_e32 v3, v3, v5 +; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc +; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_fma_f16 v1, v4, -4.0, -v1 +; VI-NEXT: v_fma_f16 v2, v2, -4.0, -v3 +; VI-NEXT: v_mov_b32_e32 v3, 0x4000 +; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5] +; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1 +; GFX9-NEXT: v_pk_fma_f16 v1, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5] +; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-SAFE-TRUE16: ; %bb.0: +; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-SAFE-FAKE16: ; %bb.0: +; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-SAFE-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-NSZ-TRUE16: ; %bb.0: +; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1 +; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo +; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0 +; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz: +; GFX11-NSZ-FAKE16: ; %bb.0: +; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NSZ-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1] +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0 +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1 +; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo +; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100 +; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = icmp eq <2 x i32> %c, zeroinitializer + %fmad = call nsz <2 x half> @llvm.fmuladd.v2f16(<2 x half> %x, <2 x half> , <2 x half> %z) + %fneg = fneg <2 x half> %fmad + %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> + ret <2 x half> %select +} + declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #0 declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #0 declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) #0 diff --git a/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll index 59a884c829312..760a126afa995 100644 --- a/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll +++ b/llvm/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll @@ -281,9 +281,9 @@ define amdgpu_kernel void @v_uextract_bit_31_32_i64(ptr addrspace(1) %out, ptr a ; GCN-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[8:11], 0 addr64 ; GCN-NEXT: s_mov_b64 s[6:7], s[10:11] ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: v_alignbit_b32 v2, v3, v2, 31 -; GCN-NEXT: v_and_b32_e32 v2, 3, v2 +; GCN-NEXT: v_lshr_b64 v[2:3], v[2:3], 31 ; GCN-NEXT: v_mov_b32_e32 v3, 0 +; GCN-NEXT: v_and_b32_e32 v2, 3, v2 ; GCN-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 ; GCN-NEXT: s_endpgm %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll index 2d95875cad882..465024a699d43 100644 --- a/llvm/test/CodeGen/AMDGPU/srem64.ll +++ b/llvm/test/CodeGen/AMDGPU/srem64.ll @@ -1232,34 +1232,33 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 % define amdgpu_kernel void @s_test_srem24_48(ptr addrspace(1) %out, i48 %x, i48 %y) { ; GCN-LABEL: s_test_srem24_48: ; GCN: ; %bb.0: +; GCN-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; GCN-NEXT: s_mov_b32 s7, 0xf000 ; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_sext_i32_i16 s9, s9 +; GCN-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; GCN-NEXT: v_cvt_f32_i32_e32 v0, s4 ; GCN-NEXT: s_sext_i32_i16 s3, s3 -; GCN-NEXT: s_sext_i32_i16 s5, s5 -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_alignbit_b32 v0, s5, v0, 24 -; GCN-NEXT: v_cvt_f32_i32_e32 v1, v0 -; GCN-NEXT: v_mov_b32_e32 v2, s2 -; GCN-NEXT: v_alignbit_b32 v2, s3, v2, 24 -; GCN-NEXT: v_cvt_f32_i32_e32 v3, v2 -; GCN-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GCN-NEXT: v_xor_b32_e32 v5, v2, v0 -; GCN-NEXT: v_ashrrev_i32_e32 v5, 30, v5 -; GCN-NEXT: v_or_b32_e32 v5, 1, v5 -; GCN-NEXT: v_mul_f32_e32 v4, v3, v4 -; GCN-NEXT: v_trunc_f32_e32 v4, v4 -; GCN-NEXT: v_mad_f32 v3, -v4, v1, v3 -; GCN-NEXT: v_cvt_i32_f32_e32 v4, v4 -; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1| -; GCN-NEXT: v_cndmask_b32_e32 v1, 0, v5, vcc +; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GCN-NEXT: s_xor_b32 s3, s2, s4 +; GCN-NEXT: s_ashr_i32 s3, s3, 30 +; GCN-NEXT: s_or_b32 s3, s3, 1 +; GCN-NEXT: v_mul_f32_e32 v2, v1, v2 +; GCN-NEXT: v_trunc_f32_e32 v2, v2 +; GCN-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GCN-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GCN-NEXT: v_cmp_ge_f32_e64 s[8:9], |v1|, |v0| +; GCN-NEXT: s_and_b64 s[8:9], s[8:9], exec +; GCN-NEXT: s_cselect_b32 s3, s3, 0 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s3, v2 +; GCN-NEXT: v_mul_lo_u32 v0, v0, s4 ; GCN-NEXT: s_mov_b32 s4, s0 -; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v4 -; GCN-NEXT: v_mul_lo_u32 v0, v1, v0 ; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_subrev_i32_e32 v0, vcc, v0, v2 +; GCN-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 24 ; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 @@ -1268,34 +1267,33 @@ define amdgpu_kernel void @s_test_srem24_48(ptr addrspace(1) %out, i48 %x, i48 % ; ; GCN-IR-LABEL: s_test_srem24_48: ; GCN-IR: ; %bb.0: +; GCN-IR-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd ; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 -; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; GCN-IR-NEXT: s_mov_b32 s7, 0xf000 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) +; GCN-IR-NEXT: s_sext_i32_i16 s9, s9 +; GCN-IR-NEXT: s_lshr_b64 s[4:5], s[8:9], 24 +; GCN-IR-NEXT: v_cvt_f32_i32_e32 v0, s4 ; GCN-IR-NEXT: s_sext_i32_i16 s3, s3 -; GCN-IR-NEXT: s_sext_i32_i16 s5, s5 -; GCN-IR-NEXT: v_mov_b32_e32 v0, s4 -; GCN-IR-NEXT: v_alignbit_b32 v0, s5, v0, 24 -; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, v0 -; GCN-IR-NEXT: v_mov_b32_e32 v2, s2 -; GCN-IR-NEXT: v_alignbit_b32 v2, s3, v2, 24 -; GCN-IR-NEXT: v_cvt_f32_i32_e32 v3, v2 -; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v4, v1 -; GCN-IR-NEXT: v_xor_b32_e32 v5, v2, v0 -; GCN-IR-NEXT: v_ashrrev_i32_e32 v5, 30, v5 -; GCN-IR-NEXT: v_or_b32_e32 v5, 1, v5 -; GCN-IR-NEXT: v_mul_f32_e32 v4, v3, v4 -; GCN-IR-NEXT: v_trunc_f32_e32 v4, v4 -; GCN-IR-NEXT: v_mad_f32 v3, -v4, v1, v3 -; GCN-IR-NEXT: v_cvt_i32_f32_e32 v4, v4 -; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1| -; GCN-IR-NEXT: v_cndmask_b32_e32 v1, 0, v5, vcc +; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, s2 +; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0 +; GCN-IR-NEXT: s_xor_b32 s3, s2, s4 +; GCN-IR-NEXT: s_ashr_i32 s3, s3, 30 +; GCN-IR-NEXT: s_or_b32 s3, s3, 1 +; GCN-IR-NEXT: v_mul_f32_e32 v2, v1, v2 +; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2 +; GCN-IR-NEXT: v_mad_f32 v1, -v2, v0, v1 +; GCN-IR-NEXT: v_cvt_i32_f32_e32 v2, v2 +; GCN-IR-NEXT: v_cmp_ge_f32_e64 s[8:9], |v1|, |v0| +; GCN-IR-NEXT: s_and_b64 s[8:9], s[8:9], exec +; GCN-IR-NEXT: s_cselect_b32 s3, s3, 0 +; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s3, v2 +; GCN-IR-NEXT: v_mul_lo_u32 v0, v0, s4 ; GCN-IR-NEXT: s_mov_b32 s4, s0 -; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v4 -; GCN-IR-NEXT: v_mul_lo_u32 v0, v1, v0 ; GCN-IR-NEXT: s_mov_b32 s5, s1 -; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, v0, v2 +; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 ; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24 ; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0 ; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0 diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll index dc11e81476a7e..1ed04f8782d5d 100644 --- a/llvm/test/CodeGen/AMDGPU/udiv64.ll +++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll @@ -721,16 +721,14 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48 ; GCN-NEXT: s_mov_b32 s6, -1 ; GCN-NEXT: v_mov_b32_e32 v3, 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_and_b32 s2, s2, 0xff000000 -; GCN-NEXT: s_and_b32 s4, s4, 0xff000000 -; GCN-NEXT: s_and_b32 s5, s5, 0xffff -; GCN-NEXT: v_mov_b32_e32 v0, s4 -; GCN-NEXT: v_alignbit_b32 v0, s5, v0, 24 -; GCN-NEXT: v_cvt_f32_u32_e32 v0, v0 ; GCN-NEXT: s_and_b32 s3, s3, 0xffff -; GCN-NEXT: v_mov_b32_e32 v1, s2 -; GCN-NEXT: v_alignbit_b32 v1, s3, v1, 24 -; GCN-NEXT: v_cvt_f32_u32_e32 v1, v1 +; GCN-NEXT: s_and_b32 s5, s5, 0xffff +; GCN-NEXT: s_and_b32 s4, s4, 0xff000000 +; GCN-NEXT: s_lshr_b64 s[4:5], s[4:5], 24 +; GCN-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GCN-NEXT: s_and_b32 s2, s2, 0xff000000 +; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-NEXT: v_cvt_f32_u32_e32 v1, s2 ; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0 ; GCN-NEXT: s_mov_b32 s4, s0 ; GCN-NEXT: s_mov_b32 s5, s1 @@ -753,16 +751,14 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48 ; GCN-IR-NEXT: s_mov_b32 s6, -1 ; GCN-IR-NEXT: v_mov_b32_e32 v3, 0 ; GCN-IR-NEXT: s_waitcnt lgkmcnt(0) -; GCN-IR-NEXT: s_and_b32 s2, s2, 0xff000000 -; GCN-IR-NEXT: s_and_b32 s4, s4, 0xff000000 -; GCN-IR-NEXT: s_and_b32 s5, s5, 0xffff -; GCN-IR-NEXT: v_mov_b32_e32 v0, s4 -; GCN-IR-NEXT: v_alignbit_b32 v0, s5, v0, 24 -; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, v0 ; GCN-IR-NEXT: s_and_b32 s3, s3, 0xffff -; GCN-IR-NEXT: v_mov_b32_e32 v1, s2 -; GCN-IR-NEXT: v_alignbit_b32 v1, s3, v1, 24 -; GCN-IR-NEXT: v_cvt_f32_u32_e32 v1, v1 +; GCN-IR-NEXT: s_and_b32 s5, s5, 0xffff +; GCN-IR-NEXT: s_and_b32 s4, s4, 0xff000000 +; GCN-IR-NEXT: s_lshr_b64 s[4:5], s[4:5], 24 +; GCN-IR-NEXT: v_cvt_f32_u32_e32 v0, s4 +; GCN-IR-NEXT: s_and_b32 s2, s2, 0xff000000 +; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24 +; GCN-IR-NEXT: v_cvt_f32_u32_e32 v1, s2 ; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0 ; GCN-IR-NEXT: s_mov_b32 s4, s0 ; GCN-IR-NEXT: s_mov_b32 s5, s1 diff --git a/llvm/test/CodeGen/AMDGPU/uniform-alignbit.ll b/llvm/test/CodeGen/AMDGPU/uniform-alignbit.ll new file mode 100644 index 0000000000000..fe8c90ee7b686 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/uniform-alignbit.ll @@ -0,0 +1,38 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck -check-prefixes=GCN %s + +define amdgpu_kernel void @uniform_build_vector(i64 %in, ptr addrspace(1) %out) { +; GCN-LABEL: uniform_build_vector: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_lshr_b64 s[4:5], s[0:1], 1 +; GCN-NEXT: s_mov_b32 s5, 0 +; GCN-NEXT: s_mov_b32 s6, s5 +; GCN-NEXT: s_mov_b32 s7, s5 +; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: ; sched_barrier mask(0x00000000) +; GCN-NEXT: v_and_b32_e32 v0, 1, v0 +; GCN-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GCN-NEXT: global_store_dword v1, v0, s[2:3] +; GCN-NEXT: s_endpgm +entry: + %shifted = lshr i64 %in, 1 + %trunc = trunc i64 %shifted to i32 + %insert = insertelement <4 x i32> zeroinitializer, i32 %trunc, i64 0 + %load = tail call <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32> %insert, i32 0, i32 0, i32 0) + tail call void @llvm.amdgcn.sched.barrier(i32 0) + %extract = extractelement <4 x i32> %load, i64 0 + %and = and i32 %extract, 1 + %convert = sitofp i32 %and to float + store float %convert, ptr addrspace(1) %out + ret void +} + +; Function Attrs: convergent nocallback nofree nounwind willreturn +declare void @llvm.amdgcn.sched.barrier(i32 immarg) #0 + +; Function Attrs: nocallback nofree nosync nounwind willreturn memory(read) +declare <4 x i32> @llvm.amdgcn.raw.buffer.load.v4i32(<4 x i32>, i32, i32, i32 immarg) #1 \ No newline at end of file diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll index dc25caadb99a9..b846ce7f12466 100644 --- a/llvm/test/CodeGen/AMDGPU/urem64.ll +++ b/llvm/test/CodeGen/AMDGPU/urem64.ll @@ -1033,30 +1033,30 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x) ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mul_hi_u32 v3, s2, v2 ; GCN-NEXT: v_mul_hi_u32 v2, s3, v2 -; GCN-NEXT: v_mul_hi_u32 v1, s2, v0 ; GCN-NEXT: s_mul_i32 s5, s3, 0xaaaaaaab +; GCN-NEXT: v_mul_hi_u32 v1, s2, v0 ; GCN-NEXT: v_add_i32_e32 v3, vcc, s5, v3 ; GCN-NEXT: s_mov_b32 s4, s0 ; GCN-NEXT: s_mul_i32 s0, s2, 0xaaaaaaaa ; GCN-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc ; GCN-NEXT: v_add_i32_e32 v3, vcc, s0, v3 -; GCN-NEXT: v_mul_hi_u32 v0, s3, v0 +; GCN-NEXT: v_mul_hi_u32 v3, s3, v0 ; GCN-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; GCN-NEXT: v_add_i32_e32 v1, vcc, v2, v1 ; GCN-NEXT: s_mul_i32 s0, s3, 0xaaaaaaaa ; GCN-NEXT: v_addc_u32_e64 v2, s[8:9], 0, 0, vcc -; GCN-NEXT: v_add_i32_e32 v1, vcc, s0, v1 -; GCN-NEXT: v_addc_u32_e32 v0, vcc, v0, v2, vcc -; GCN-NEXT: v_alignbit_b32 v1, v0, v1, 4 -; GCN-NEXT: v_lshrrev_b32_e32 v0, 4, v0 -; GCN-NEXT: v_mul_hi_u32 v2, v1, 24 +; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v1 +; GCN-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc +; GCN-NEXT: v_lshrrev_b32_e32 v2, 4, v1 +; GCN-NEXT: v_lshr_b64 v[0:1], v[0:1], 4 +; GCN-NEXT: v_mul_lo_u32 v1, v2, 24 +; GCN-NEXT: v_mul_hi_u32 v2, v0, 24 ; GCN-NEXT: v_mul_lo_u32 v0, v0, 24 -; GCN-NEXT: v_mul_lo_u32 v1, v1, 24 -; GCN-NEXT: v_mov_b32_e32 v3, s3 ; GCN-NEXT: s_mov_b32 s5, s1 -; GCN-NEXT: v_add_i32_e32 v2, vcc, v0, v2 -; GCN-NEXT: v_sub_i32_e32 v0, vcc, s2, v1 -; GCN-NEXT: v_subb_u32_e32 v1, vcc, v3, v2, vcc +; GCN-NEXT: v_add_i32_e32 v1, vcc, v1, v2 +; GCN-NEXT: v_mov_b32_e32 v2, s3 +; GCN-NEXT: v_sub_i32_e32 v0, vcc, s2, v0 +; GCN-NEXT: v_subb_u32_e32 v1, vcc, v2, v1, vcc ; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GCN-NEXT: s_endpgm ; diff --git a/llvm/test/CodeGen/AMDGPU/v_mac.ll b/llvm/test/CodeGen/AMDGPU/v_mac.ll index c12871536bafa..f5dc824aae35f 100644 --- a/llvm/test/CodeGen/AMDGPU/v_mac.ll +++ b/llvm/test/CodeGen/AMDGPU/v_mac.ll @@ -116,7 +116,7 @@ entry: ; GCN-LABEL: {{^}}nsz_mad_sub0_src0: ; GCN-NOT: v_mac_f32 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}} -define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 { +define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) { entry: %b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1 %c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2 @@ -125,7 +125,7 @@ entry: %b = load float, ptr addrspace(1) %b_ptr %c = load float, ptr addrspace(1) %c_ptr - %neg_a = fsub float 0.0, %a + %neg_a = fsub nsz float 0.0, %a %tmp0 = fmul float %neg_a, %b %tmp1 = fadd float %tmp0, %c @@ -176,7 +176,7 @@ entry: ; GCN-LABEL: {{^}}nsz_mad_sub0_src1: ; GCN-NOT: v_mac_f32 ; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}} -define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 { +define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) { entry: %b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1 %c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2 @@ -185,7 +185,7 @@ entry: %b = load float, ptr addrspace(1) %b_ptr %c = load float, ptr addrspace(1) %c_ptr - %neg_b = fsub float 0.0, %b + %neg_b = fsub nsz float 0.0, %b %tmp0 = fmul float %a, %neg_b %tmp1 = fadd float %tmp0, %c @@ -310,6 +310,5 @@ define float @v_mac_f32_dynamic_ftz(float %a, float %b, float %c) "denormal-fp-m declare i32 @llvm.amdgcn.workitem.id.x() #2 attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" } -attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" } attributes #2 = { nounwind readnone } attributes #3 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll index bcc60b06db291..8da6f2348690a 100644 --- a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll +++ b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll @@ -236,7 +236,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %a.neg = fsub half 0.0, %a.val + %a.neg = fsub nsz half 0.0, %a.val %t.val = fmul half %a.neg, %b.val %r.val = fadd half %t.val, %c.val @@ -263,7 +263,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %b.neg = fsub half 0.0, %b.val + %b.neg = fsub nsz half 0.0, %b.val %t.val = fmul half %a.val, %b.neg %r.val = fadd half %t.val, %c.val @@ -290,7 +290,7 @@ entry: %b.val = load half, ptr addrspace(1) %b %c.val = load half, ptr addrspace(1) %c - %c.neg = fsub half 0.0, %c.val + %c.neg = fsub nsz half 0.0, %c.val %t.val = fmul half %a.val, %b.val %r.val = fadd half %t.val, %c.neg @@ -601,7 +601,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %a.neg = fsub <2 x half> , %a.val + %a.neg = fsub nsz <2 x half> , %a.val %t.val = fmul <2 x half> %a.neg, %b.val %r.val = fadd <2 x half> %t.val, %c.val @@ -634,7 +634,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %b.neg = fsub <2 x half> , %b.val + %b.neg = fsub nsz <2 x half> , %b.val %t.val = fmul <2 x half> %a.val, %b.neg %r.val = fadd <2 x half> %t.val, %c.val @@ -667,7 +667,7 @@ entry: %b.val = load <2 x half>, ptr addrspace(1) %b %c.val = load <2 x half>, ptr addrspace(1) %c - %c.neg = fsub <2 x half> , %c.val + %c.neg = fsub nsz <2 x half> , %c.val %t.val = fmul <2 x half> %a.val, %b.val %r.val = fadd <2 x half> %t.val, %c.neg @@ -678,5 +678,5 @@ entry: declare void @llvm.amdgcn.s.barrier() #2 attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" } -attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" "denormal-fp-math"="preserve-sign,preserve-sign" } +attributes #1 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" } attributes #2 = { nounwind convergent } diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-vmcnt-loop.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-vmcnt-loop.mir index 0ddd2aa285b26..0d54bfaed8130 100644 --- a/llvm/test/CodeGen/AMDGPU/waitcnt-vmcnt-loop.mir +++ b/llvm/test/CodeGen/AMDGPU/waitcnt-vmcnt-loop.mir @@ -1,4 +1,5 @@ # RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefix=GFX9 %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s -debugify-and-strip-all-safe | FileCheck -check-prefix=GFX9 %s # RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefix=GFX10 %s # RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefix=GFX12 %s diff --git a/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll b/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll index 484ad93bebeab..0e8d47347286b 100644 --- a/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll +++ b/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 -enable-unsafe-fp-math < %s +; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 < %s ; PR5367 define arm_aapcs_vfpcc void @_Z27Benchmark_SceDualQuaternionPvm(ptr nocapture %pBuffer, i32 %numItems) nounwind { diff --git a/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll b/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll index 80c1968c85743..593fb9348506b 100644 --- a/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll +++ b/llvm/test/CodeGen/ARM/2012-04-10-DAGCombine.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 -enable-unsafe-fp-math %s -o /dev/null +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o /dev/null ;target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64" ;target triple = "armv7-none-linux-gnueabi" diff --git a/llvm/test/CodeGen/ARM/build-attributes-fn-attr3.ll b/llvm/test/CodeGen/ARM/build-attributes-fn-attr3.ll index 7f70c44c78f9c..27d1dc20bd815 100644 --- a/llvm/test/CodeGen/ARM/build-attributes-fn-attr3.ll +++ b/llvm/test/CodeGen/ARM/build-attributes-fn-attr3.ll @@ -11,7 +11,10 @@ define i32 @foo() local_unnamed_addr #0 { entry: + %a = call float @llvm.fma.f32(float 0.0, float 0.0, float 0.0) ret i32 42 } +declare float @llvm.fma.f32(float, float, float) + attributes #0 = { minsize norecurse nounwind optsize readnone "no-trapping-math"="true" "denormal-fp-math"="ieee"} diff --git a/llvm/test/CodeGen/ARM/build-attributes-fn-attr4.ll b/llvm/test/CodeGen/ARM/build-attributes-fn-attr4.ll index c99cb27adf155..9c8dd8d95c61c 100644 --- a/llvm/test/CodeGen/ARM/build-attributes-fn-attr4.ll +++ b/llvm/test/CodeGen/ARM/build-attributes-fn-attr4.ll @@ -10,7 +10,10 @@ define i32 @foo1() local_unnamed_addr #0 { entry: + %a = call float @llvm.fma.f32(float 0.0, float 0.0, float 0.0) ret i32 42 } +declare float @llvm.fma.f32(float, float, float) + attributes #0 = { minsize norecurse nounwind optsize readnone "denormal-fp-math"="positive-zero,positive-zero" } diff --git a/llvm/test/CodeGen/ARM/build-attributes-fn-attr5.ll b/llvm/test/CodeGen/ARM/build-attributes-fn-attr5.ll index ba1e7d7ce55c1..cda3ea0fc6d18 100644 --- a/llvm/test/CodeGen/ARM/build-attributes-fn-attr5.ll +++ b/llvm/test/CodeGen/ARM/build-attributes-fn-attr5.ll @@ -10,7 +10,10 @@ define i32 @foo1() local_unnamed_addr #0 { entry: + %a = call float @llvm.fma.f32(float 0.0, float 0.0, float 0.0) ret i32 42 } +declare float @llvm.fma.f32(float, float, float) + attributes #0 = { minsize norecurse nounwind optsize readnone "denormal-fp-math"="preserve-sign,preserve-sign"} diff --git a/llvm/test/CodeGen/ARM/build-attributes-fn-attr6.ll b/llvm/test/CodeGen/ARM/build-attributes-fn-attr6.ll index 1cd68aed1e051..59d0a40198392 100644 --- a/llvm/test/CodeGen/ARM/build-attributes-fn-attr6.ll +++ b/llvm/test/CodeGen/ARM/build-attributes-fn-attr6.ll @@ -11,6 +11,7 @@ define i32 @foo1() local_unnamed_addr #0 { entry: + %a = call float @llvm.fma.f32(float 0.0, float 0.0, float 0.0) ret i32 42 } @@ -19,5 +20,7 @@ entry: ret i32 42 } +declare float @llvm.fma.f32(float, float, float) + attributes #0 = { minsize norecurse nounwind optsize readnone "denormal-fp-math"="preserve-sign,preserve-sign"} attributes #1 = { minsize norecurse nounwind optsize readnone "denormal-fp-math"="positive-zero,positive-zero"} diff --git a/llvm/test/CodeGen/ARM/build-attributes.ll b/llvm/test/CodeGen/ARM/build-attributes.ll index 68844aed03630..306a4a31b79fa 100644 --- a/llvm/test/CodeGen/ARM/build-attributes.ll +++ b/llvm/test/CodeGen/ARM/build-attributes.ll @@ -3,23 +3,16 @@ ; RUN: llc < %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale -mattr=+strict-align | FileCheck %s --check-prefix=XSCALE ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6 -; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST ; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M -; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align | FileCheck %s --check-prefix=ARM1156T2F-S -; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=ARM1156T2F-S-FAST ; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING @@ -31,35 +24,24 @@ ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi | FileCheck %s --check-prefix=V8MMAINLINE ; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi -mattr=+dsp | FileCheck %s --check-prefix=V8MMAINLINE_DSP ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,-d32 | FileCheck %s --check-prefix=CORTEX-A5-NONEON ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A5-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A8-HARD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-HARD-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A12-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15 -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 | FileCheck %s --check-prefix=CORTEX-A17-DEFAULT -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-A17-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-no-trapping-fp-math | FileCheck %s --check-prefix=NO-TRAPPING-MATH ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -denormal-fp-math=ieee | FileCheck %s --check-prefix=DENORMAL-IEEE @@ -74,37 +56,26 @@ ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus | FileCheck %s --check-prefix=CORTEX-M0PLUS -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 | FileCheck %s --check-prefix=CORTEX-M1 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align | FileCheck %s --check-prefix=SC000 -; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST ; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 | FileCheck %s --check-prefix=CORTEX-M3 -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 | FileCheck %s --check-prefix=SC300 -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD -; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=CORTEX-M23 ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 | FileCheck %s --check-prefix=CORTEX-M33 -; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m35p | FileCheck %s --check-prefix=CORTEX-M35P @@ -113,49 +84,34 @@ ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4 | FileCheck %s --check-prefix=CORTEX-R4 ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4f | FileCheck %s --check-prefix=CORTEX-R4F ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 | FileCheck %s --check-prefix=CORTEX-R7 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 | FileCheck %s --check-prefix=CORTEX-R8 -; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R8-FAST ; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 | FileCheck %s --check-prefix=CORTEX-A32 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A32-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 | FileCheck %s --check-prefix=CORTEX-A35 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A35-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 | FileCheck %s --check-prefix=CORTEX-A72 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a73 | FileCheck %s --check-prefix=CORTEX-A73 ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 | FileCheck %s --check-prefix=EXYNOS-M3 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 | FileCheck %s --check-prefix=EXYNOS-M4 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 | FileCheck %s --check-prefix=EXYNOS-M5 -; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-FAST ; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A-FAST ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=CORTEX-A7-CHECK -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-CHECK-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2sp,-vfp3,-vfp4,-neon,-fp16 | FileCheck %s --check-prefix=CORTEX-A7-NOFPU -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2sp,-vfp3,-vfp4,-neon,-fp16 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,-d32,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER @@ -278,15 +234,6 @@ ; V6-NOT: .eabi_attribute 28 ; V6: .eabi_attribute 38, 1 -; V6-FAST-NOT: .eabi_attribute 19 -;; Despite the V6 CPU having no FPU by default, we chose to flush to -;; positive zero here. There's no hardware support doing this, but the -;; fast maths software library might. -; V6-FAST-NOT: .eabi_attribute 20 -; V6-FAST-NOT: .eabi_attribute 21 -; V6-FAST-NOT: .eabi_attribute 22 -; V6-FAST: .eabi_attribute 23, 1 - ;; We emit 6, 12 for both v6-M and v6S-M, technically this is incorrect for ;; V6-M, however we don't model the OS extension so this is fine. ; V6M: .eabi_attribute 6, 12 @@ -312,14 +259,6 @@ ; V6M-NOT: .eabi_attribute 28 ; V6M: .eabi_attribute 38, 1 -; V6M-FAST-NOT: .eabi_attribute 19 -;; Despite the V6M CPU having no FPU by default, we chose to flush to -;; positive zero here. There's no hardware support doing this, but the -;; fast maths software library might. -; V6M-FAST-NOT: .eabi_attribute 20 -; V6M-FAST-NOT: .eabi_attribute 21 -; V6M-FAST-NOT: .eabi_attribute 22 -; V6M-FAST: .eabi_attribute 23, 1 ; ARM1156T2F-S: .cpu arm1156t2f-s ; ARM1156T2F-S: .eabi_attribute 6, 8 @@ -342,14 +281,6 @@ ; ARM1156T2F-S-NOT: .eabi_attribute 28 ; ARM1156T2F-S: .eabi_attribute 38, 1 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 19 -;; V6 cores default to flush to positive zero (value 0). Note that value 2 is also equally -;; valid for this core, it's an implementation defined question as to which of 0 and 2 you -;; select. LLVM historically picks 0. -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 20 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 21 -; ARM1156T2F-S-FAST-NOT: .eabi_attribute 22 -; ARM1156T2F-S-FAST: .eabi_attribute 23, 1 ; V7M: .eabi_attribute 6, 10 ; V7M: .eabi_attribute 7, 77 @@ -374,15 +305,6 @@ ; V7M-NOT: .eabi_attribute 28 ; V7M: .eabi_attribute 38, 1 -; V7M-FAST-NOT: .eabi_attribute 19 -;; Despite the V7M CPU having no FPU by default, we chose to flush -;; preserving sign. This matches what the hardware would do in the -;; architecture revision were to exist on the current target. -; V7M-FAST: .eabi_attribute 20, 2 -; V7M-FAST-NOT: .eabi_attribute 21 -; V7M-FAST-NOT: .eabi_attribute 22 -; V7M-FAST: .eabi_attribute 23, 1 - ; V7: .syntax unified ; V7: .eabi_attribute 6, 10 ; V7-NOT: .eabi_attribute 27 @@ -401,13 +323,6 @@ ; V7-NOT: .eabi_attribute 28 ; V7: .eabi_attribute 38, 1 -; V7-FAST-NOT: .eabi_attribute 19 -;; The default CPU does have an FPU and it must be VFPv3 or better, so it flushes -;; denormals to zero preserving the sign. -; V7-FAST: .eabi_attribute 20, 2 -; V7-FAST-NOT: .eabi_attribute 21 -; V7-FAST-NOT: .eabi_attribute 22 -; V7-FAST: .eabi_attribute 23, 1 ; V7VE: .syntax unified ; V7VE: .eabi_attribute 6, 10 @ Tag_CPU_arch @@ -435,12 +350,6 @@ ; V8-NOT: .eabi_attribute 22 ; V8: .eabi_attribute 23, 3 -; V8-FAST-NOT: .eabi_attribute 19 -;; The default does have an FPU, and for V8-A, it flushes preserving sign. -; V8-FAST: .eabi_attribute 20, 2 -; V8-FAST-NOT: .eabi_attribute 21 -; V8-FAST-NOT: .eabi_attribute 22 -; V8-FAST: .eabi_attribute 23, 1 ; Vt8: .syntax unified ; Vt8: .eabi_attribute 6, 14 @@ -552,15 +461,11 @@ ;; We default to IEEE 754 compliance ; CORTEX-A7-CHECK: .eabi_attribute 20, 1 ;; The A7 has VFPv3 support by default, so flush preserving sign. -; CORTEX-A7-CHECK-FAST: .eabi_attribute 20, 2 ; CORTEX-A7-NOFPU: .eabi_attribute 20, 1 ;; Despite there being no FPU, we chose to flush to zero preserving ;; sign. This matches what the hardware would do for this architecture ;; revision. -; CORTEX-A7-NOFPU-FAST: .eabi_attribute 20, 2 ; CORTEX-A7-FPUV4: .eabi_attribute 20, 1 -;; The VFPv4 FPU flushes preserving sign. -; CORTEX-A7-FPUV4-FAST: .eabi_attribute 20, 2 ; Tag_ABI_FP_exceptions ; CORTEX-A7-CHECK: .eabi_attribute 21, 1 @@ -610,13 +515,6 @@ ; CORTEX-A5-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A5-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 19 -;; The A5 defaults to a VFPv4 FPU, so it flushed preserving the sign when -ffast-math -;; is given. -; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-DEFAULT-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-DEFAULT-FAST: .eabi_attribute 23, 1 ; CORTEX-A5-NONEON: .cpu cortex-a5 ; CORTEX-A5-NONEON: .eabi_attribute 6, 10 @@ -634,13 +532,6 @@ ; CORTEX-A5-NONEON: .eabi_attribute 24, 1 ; CORTEX-A5-NONEON: .eabi_attribute 25, 1 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 19 -;; The A5 defaults to a VFPv4 FPU, so it flushed preserving sign when -ffast-math -;; is given. -; CORTEX-A5-NONEON-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-NONEON-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-NONEON-FAST: .eabi_attribute 23, 1 ; CORTEX-A5-NOFPU: .cpu cortex-a5 ; CORTEX-A5-NOFPU: .eabi_attribute 6, 10 @@ -659,14 +550,9 @@ ; CORTEX-A5-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A5-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 19 ;; Despite there being no FPU, we chose to flush to zero preserving ;; sign. This matches what the hardware would do for this architecture ;; revision. -; CORTEX-A5-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-A8-SOFT: .cpu cortex-a8 ; CORTEX-A8-SOFT: .eabi_attribute 6, 10 @@ -712,15 +598,6 @@ ; CORTEX-A9-SOFT-NOT: .eabi_attribute 28 ; CORTEX-A9-SOFT: .eabi_attribute 38, 1 -; CORTEX-A8-SOFT-FAST-NOT: .eabi_attribute 19 -; CORTEX-A9-SOFT-FAST-NOT: .eabi_attribute 19 -;; The A9 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A8-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-A9-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 21 -; CORTEX-A5-SOFT-FAST-NOT: .eabi_attribute 22 -; CORTEX-A5-SOFT-FAST: .eabi_attribute 23, 1 ; CORTEX-A8-HARD: .cpu cortex-a8 ; CORTEX-A8-HARD: .eabi_attribute 6, 10 @@ -766,21 +643,6 @@ ; CORTEX-A9-HARD: .eabi_attribute 28, 1 ; CORTEX-A9-HARD: .eabi_attribute 38, 1 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 19 -;; The A8 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A8-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A8-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A8-HARD-FAST: .eabi_attribute 23, 1 - -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 19 -;; The A9 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A9-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A9-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A9-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-A12-DEFAULT: .cpu cortex-a12 ; CORTEX-A12-DEFAULT: .eabi_attribute 6, 10 @@ -800,13 +662,6 @@ ; CORTEX-A12-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A12-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A12-DEFAULT-FAST-NOT: .eabi_attribute 19 -;; The A12 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A12-DEFAULT-FAST: .eabi_attribute 20, 2 -; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-A12-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-A12-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-A12-NOFPU: .cpu cortex-a12 ; CORTEX-A12-NOFPU: .eabi_attribute 6, 10 @@ -826,14 +681,6 @@ ; CORTEX-A12-NOFPU: .eabi_attribute 24, 1 ; CORTEX-A12-NOFPU: .eabi_attribute 25, 1 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-A12-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A12-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A12-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-A15: .cpu cortex-a15 ; CORTEX-A15: .eabi_attribute 6, 10 @@ -857,13 +704,6 @@ ; CORTEX-A15-NOT: .eabi_attribute 28 ; CORTEX-A15: .eabi_attribute 38, 1 -; CORTEX-A15-FAST-NOT: .eabi_attribute 19 -;; The A15 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A15-FAST: .eabi_attribute 20, 2 -; CORTEX-A15-FAST-NOT: .eabi_attribute 21 -; CORTEX-A15-FAST-NOT: .eabi_attribute 22 -; CORTEX-A15-FAST: .eabi_attribute 23, 1 ; CORTEX-A17-DEFAULT: .cpu cortex-a17 ; CORTEX-A17-DEFAULT: .eabi_attribute 6, 10 @@ -883,13 +723,6 @@ ; CORTEX-A17-DEFAULT: .eabi_attribute 24, 1 ; CORTEX-A17-DEFAULT: .eabi_attribute 25, 1 -; CORTEX-A17-FAST-NOT: .eabi_attribute 19 -;; The A17 defaults to a VFPv3 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-A17-FAST: .eabi_attribute 20, 2 -; CORTEX-A17-FAST-NOT: .eabi_attribute 21 -; CORTEX-A17-FAST-NOT: .eabi_attribute 22 -; CORTEX-A17-FAST: .eabi_attribute 23, 1 ; CORTEX-A17-NOFPU: .cpu cortex-a17 ; CORTEX-A17-NOFPU: .eabi_attribute 6, 10 @@ -910,13 +743,6 @@ ; CORTEX-A17-NOFPU: .eabi_attribute 25, 1 ; CORTEX-A17-NOFPU-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-A17-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-A17-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-A17-NOFPU-FAST: .eabi_attribute 23, 1 ; Test flags -enable-no-trapping-fp-math and -denormal-fp-math: ; NO-TRAPPING-MATH: .eabi_attribute 21, 0 @@ -946,16 +772,6 @@ ; CORTEX-M0-NOT: .eabi_attribute 28 ; CORTEX-M0: .eabi_attribute 38, 1 -; CORTEX-M0-FAST-NOT: .eabi_attribute 19 -;; Despite the M0 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M0-FAST-NOT: .eabi_attribute 20 -; CORTEX-M0-FAST-NOT: .eabi_attribute 21 -; CORTEX-M0-FAST-NOT: .eabi_attribute 22 -; CORTEX-M0-FAST: .eabi_attribute 23, 1 ; CORTEX-M0PLUS: .cpu cortex-m0plus ; CORTEX-M0PLUS: .eabi_attribute 6, 12 @@ -978,16 +794,6 @@ ; CORTEX-M0PLUS-NOT: .eabi_attribute 28 ; CORTEX-M0PLUS: .eabi_attribute 38, 1 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 19 -;; Despite the M0+ CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 20 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 21 -; CORTEX-M0PLUS-FAST-NOT: .eabi_attribute 22 -; CORTEX-M0PLUS-FAST: .eabi_attribute 23, 1 ; CORTEX-M1: .cpu cortex-m1 ; CORTEX-M1: .eabi_attribute 6, 12 @@ -1010,16 +816,6 @@ ; CORTEX-M1-NOT: .eabi_attribute 28 ; CORTEX-M1: .eabi_attribute 38, 1 -; CORTEX-M1-FAST-NOT: .eabi_attribute 19 -;; Despite the M1 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; CORTEX-M1-FAST-NOT: .eabi_attribute 20 -; CORTEX-M1-FAST-NOT: .eabi_attribute 21 -; CORTEX-M1-FAST-NOT: .eabi_attribute 22 -; CORTEX-M1-FAST: .eabi_attribute 23, 1 ; SC000: .cpu sc000 ; SC000: .eabi_attribute 6, 12 @@ -1041,16 +837,6 @@ ; SC000-NOT: .eabi_attribute 28 ; SC000: .eabi_attribute 38, 1 -; SC000-FAST-NOT: .eabi_attribute 19 -;; Despite the SC000 CPU having no FPU in this scenario, we chose to -;; flush to positive zero here. There's no hardware support doing -;; this, but the fast maths software library might and such behaviour -;; would match hardware support on this architecture revision if it -;; existed. -; SC000-FAST-NOT: .eabi_attribute 20 -; SC000-FAST-NOT: .eabi_attribute 21 -; SC000-FAST-NOT: .eabi_attribute 22 -; SC000-FAST: .eabi_attribute 23, 1 ; CORTEX-M3: .cpu cortex-m3 ; CORTEX-M3: .eabi_attribute 6, 10 @@ -1073,14 +859,6 @@ ; CORTEX-M3-NOT: .eabi_attribute 28 ; CORTEX-M3: .eabi_attribute 38, 1 -; CORTEX-M3-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-M3-FAST: .eabi_attribute 20, 2 -; CORTEX-M3-FAST-NOT: .eabi_attribute 21 -; CORTEX-M3-FAST-NOT: .eabi_attribute 22 -; CORTEX-M3-FAST: .eabi_attribute 23, 1 ; SC300: .cpu sc300 ; SC300: .eabi_attribute 6, 10 @@ -1103,14 +881,6 @@ ; SC300-NOT: .eabi_attribute 28 ; SC300: .eabi_attribute 38, 1 -; SC300-FAST-NOT: .eabi_attribute 19 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; SC300-FAST: .eabi_attribute 20, 2 -; SC300-FAST-NOT: .eabi_attribute 21 -; SC300-FAST-NOT: .eabi_attribute 22 -; SC300-FAST: .eabi_attribute 23, 1 ; CORTEX-M4-SOFT: .cpu cortex-m4 ; CORTEX-M4-SOFT: .eabi_attribute 6, 13 @@ -1134,13 +904,6 @@ ; CORTEX-M4-SOFT-NOT: .eabi_attribute 28 ; CORTEX-M4-SOFT: .eabi_attribute 38, 1 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 19 -;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-M4-SOFT-FAST: .eabi_attribute 20, 2 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 21 -; CORTEX-M4-SOFT-FAST-NOT: .eabi_attribute 22 -; CORTEX-M4-SOFT-FAST: .eabi_attribute 23, 1 ; CORTEX-M4-HARD: .cpu cortex-m4 ; CORTEX-M4-HARD: .eabi_attribute 6, 13 @@ -1164,13 +927,6 @@ ; CORTEX-M4-HARD: .eabi_attribute 28, 1 ; CORTEX-M4-HARD: .eabi_attribute 38, 1 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 19 -;; The M4 defaults to a VFPv4 FPU, so it flushes preserving the sign when -;; -ffast-math is specified. -; CORTEX-M4-HARD-FAST: .eabi_attribute 20, 2 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 21 -; CORTEX-M4-HARD-FAST-NOT: .eabi_attribute 22 -; CORTEX-M4-HARD-FAST: .eabi_attribute 23, 1 ; CORTEX-M7: .cpu cortex-m7 ; CORTEX-M7: .eabi_attribute 6, 13 @@ -1197,16 +953,6 @@ ; CORTEX-M7: .eabi_attribute 38, 1 ; CORTEX-M7: .eabi_attribute 14, 0 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 19 -;; The M7 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-M7-FAST: .eabi_attribute 20, 2 -;; Despite there being no FPU, we chose to flush to zero preserving -;; sign. This matches what the hardware would do for this architecture -;; revision. -; CORTEX-M7-NOFPU-FAST: .eabi_attribute 20, 2 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 21 -; CORTEX-M7-NOFPU-FAST-NOT: .eabi_attribute 22 -; CORTEX-M7-NOFPU-FAST: .eabi_attribute 23, 1 ; CORTEX-R4: .cpu cortex-r4 ; CORTEX-R4: .eabi_attribute 6, 10 @@ -1273,12 +1019,6 @@ ; CORTEX-R5-NOT: .eabi_attribute 28 ; CORTEX-R5: .eabi_attribute 38, 1 -; CORTEX-R5-FAST-NOT: .eabi_attribute 19 -;; The R5 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R5-FAST: .eabi_attribute 20, 2 -; CORTEX-R5-FAST-NOT: .eabi_attribute 21 -; CORTEX-R5-FAST-NOT: .eabi_attribute 22 -; CORTEX-R5-FAST: .eabi_attribute 23, 1 ; CORTEX-R7: .cpu cortex-r7 ; CORTEX-R7: .eabi_attribute 6, 10 @@ -1301,12 +1041,6 @@ ; CORTEX-R7-NOT: .eabi_attribute 28 ; CORTEX-R7: .eabi_attribute 38, 1 -; CORTEX-R7-FAST-NOT: .eabi_attribute 19 -;; The R7 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R7-FAST: .eabi_attribute 20, 2 -; CORTEX-R7-FAST-NOT: .eabi_attribute 21 -; CORTEX-R7-FAST-NOT: .eabi_attribute 22 -; CORTEX-R7-FAST: .eabi_attribute 23, 1 ; CORTEX-R8: .cpu cortex-r8 ; CORTEX-R8: .eabi_attribute 6, 10 @@ -1329,12 +1063,6 @@ ; CORTEX-R8-NOT: .eabi_attribute 28 ; CORTEX-R8: .eabi_attribute 38, 1 -; CORTEX-R8-FAST-NOT: .eabi_attribute 19 -;; The R8 has the VFPv3 FP unit, which always flushes preserving sign. -; CORTEX-R8-FAST: .eabi_attribute 20, 2 -; CORTEX-R8-FAST-NOT: .eabi_attribute 21 -; CORTEX-R8-FAST-NOT: .eabi_attribute 22 -; CORTEX-R8-FAST: .eabi_attribute 23, 1 ; CORTEX-A32: .cpu cortex-a32 ; CORTEX-A32: .eabi_attribute 6, 14 @@ -1359,12 +1087,6 @@ ; CORTEX-A32-NOT: .eabi_attribute 28 ; CORTEX-A32: .eabi_attribute 38, 1 -; CORTEX-A32-FAST-NOT: .eabi_attribute 19 -;; The A32 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A32-FAST: .eabi_attribute 20, 2 -; CORTEX-A32-FAST-NOT: .eabi_attribute 21 -; CORTEX-A32-FAST-NOT: .eabi_attribute 22 -; CORTEX-A32-FAST: .eabi_attribute 23, 1 ; CORTEX-M23: .cpu cortex-m23 ; CORTEX-M23: .eabi_attribute 6, 16 @@ -1430,11 +1152,6 @@ ; CORTEX-M35P: .eabi_attribute 38, 1 ; CORTEX-M35P: .eabi_attribute 14, 0 -; CORTEX-M33-FAST-NOT: .eabi_attribute 19 -; CORTEX-M33-FAST: .eabi_attribute 20, 2 -; CORTEX-M33-FAST-NOT: .eabi_attribute 21 -; CORTEX-M33-FAST-NOT: .eabi_attribute 22 -; CORTEX-M33-FAST: .eabi_attribute 23, 1 ; CORTEX-A35: .cpu cortex-a35 ; CORTEX-A35: .eabi_attribute 6, 14 @@ -1459,12 +1176,6 @@ ; CORTEX-A35-NOT: .eabi_attribute 28 ; CORTEX-A35: .eabi_attribute 38, 1 -; CORTEX-A35-FAST-NOT: .eabi_attribute 19 -;; The A35 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A35-FAST: .eabi_attribute 20, 2 -; CORTEX-A35-FAST-NOT: .eabi_attribute 21 -; CORTEX-A35-FAST-NOT: .eabi_attribute 22 -; CORTEX-A35-FAST: .eabi_attribute 23, 1 ; CORTEX-A53: .cpu cortex-a53 ; CORTEX-A53: .eabi_attribute 6, 14 @@ -1489,12 +1200,6 @@ ; CORTEX-A53-NOT: .eabi_attribute 28 ; CORTEX-A53: .eabi_attribute 38, 1 -; CORTEX-A53-FAST-NOT: .eabi_attribute 19 -;; The A53 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A53-FAST: .eabi_attribute 20, 2 -; CORTEX-A53-FAST-NOT: .eabi_attribute 21 -; CORTEX-A53-FAST-NOT: .eabi_attribute 22 -; CORTEX-A53-FAST: .eabi_attribute 23, 1 ; CORTEX-A57: .cpu cortex-a57 ; CORTEX-A57: .eabi_attribute 6, 14 @@ -1519,12 +1224,6 @@ ; CORTEX-A57-NOT: .eabi_attribute 28 ; CORTEX-A57: .eabi_attribute 38, 1 -; CORTEX-A57-FAST-NOT: .eabi_attribute 19 -;; The A57 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A57-FAST: .eabi_attribute 20, 2 -; CORTEX-A57-FAST-NOT: .eabi_attribute 21 -; CORTEX-A57-FAST-NOT: .eabi_attribute 22 -; CORTEX-A57-FAST: .eabi_attribute 23, 1 ; CORTEX-A72: .cpu cortex-a72 ; CORTEX-A72: .eabi_attribute 6, 14 @@ -1549,12 +1248,6 @@ ; CORTEX-A72-NOT: .eabi_attribute 28 ; CORTEX-A72: .eabi_attribute 38, 1 -; CORTEX-A72-FAST-NOT: .eabi_attribute 19 -;; The A72 has the ARMv8 FP unit, which always flushes preserving sign. -; CORTEX-A72-FAST: .eabi_attribute 20, 2 -; CORTEX-A72-FAST-NOT: .eabi_attribute 21 -; CORTEX-A72-FAST-NOT: .eabi_attribute 22 -; CORTEX-A72-FAST: .eabi_attribute 23, 1 ; CORTEX-A73: .cpu cortex-a73 ; CORTEX-A73: .eabi_attribute 6, 14 @@ -1580,12 +1273,6 @@ ; CORTEX-A73: .eabi_attribute 38, 1 ; CORTEX-A73: .eabi_attribute 14, 0 -; EXYNOS-FAST-NOT: .eabi_attribute 19 -;; The Exynos processors have the ARMv8 FP unit, which always flushes preserving sign. -; EXYNOS-FAST: .eabi_attribute 20, 2 -; EXYNOS-FAST-NOT: .eabi_attribute 21 -; EXYNOS-FAST-NOT: .eabi_attribute 22 -; EXYNOS-FAST: .eabi_attribute 23, 1 ; EXYNOS-M3: .cpu exynos-m3 ; EXYNOS-M3: .eabi_attribute 6, 14 @@ -1684,12 +1371,6 @@ ; GENERIC-ARMV8_1-A-NOT: .eabi_attribute 28 ; GENERIC-ARMV8_1-A: .eabi_attribute 38, 1 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 19 -;; GENERIC-ARMV8_1-A has the ARMv8 FP unit, which always flushes preserving sign. -; GENERIC-ARMV8_1-A-FAST: .eabi_attribute 20, 2 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 21 -; GENERIC-ARMV8_1-A-FAST-NOT: .eabi_attribute 22 -; GENERIC-ARMV8_1-A-FAST: .eabi_attribute 23, 1 ; RELOC-PIC: .eabi_attribute 15, 1 ; RELOC-PIC: .eabi_attribute 16, 1 diff --git a/llvm/test/CodeGen/ARM/fnegs.ll b/llvm/test/CodeGen/ARM/fnegs.ll index 435a600822e4d..6055b8f6dd93b 100644 --- a/llvm/test/CodeGen/ARM/fnegs.ll +++ b/llvm/test/CodeGen/ARM/fnegs.ll @@ -10,11 +10,11 @@ ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA8 -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --denormal-fp-math=preserve-sign %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA8U ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ -; RUN: | FileCheck %s -check-prefix=CORTEXA8U +; RUN: | FileCheck %s -check-prefix=CORTEXA8U-DARWIN ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - \ ; RUN: | FileCheck %s -check-prefix=CORTEXA9 @@ -41,7 +41,10 @@ entry: ; CORTEXA8: vneg.f32 s{{.*}}, s{{.*}} ; CORTEXA8U-LABEL: test1: -; CORTEXA8U: vneg.f32 d{{.*}}, d{{.*}} +; CORTEXA8U: vsub.f32 d{{.*}}, d{{.*}}, d{{.*}} + +; CORTEXA8U-DARWIN-LABEL: test1: +; CORTEXA8U-DARWIN: vneg.f32 d{{.*}}, d{{.*}} ; CORTEXA9-LABEL: test1: ; CORTEXA9: vneg.f32 s{{.*}}, s{{.*}} @@ -110,9 +113,13 @@ define <2 x float> @fneg_bitcast(i64 %i) { ; CORTEXA8-NOT: vneg.f32 ; CORTEXA8U-LABEL: fneg_bitcast: -; CORTEXA8U-DAG: eor r0, r0, #-2147483648 -; CORTEXA8U-DAG: eor r1, r1, #-2147483648 -; CORTEXA8U-NOT: vneg.f32 +; CORTEXA8U-DAG: vmov.i32 d{{.*}}, #0x80000000 +; CORTEXA8U-DAG: vsub.f32 d{{.*}}, d{{.*}}, d{{.*}} + +; CORTEXA8U-DARWIN-LABEL: fneg_bitcast: +; CORTEXA8U-DARWIN-DAG: eor r0, r0, #-2147483648 +; CORTEXA8U-DARWIN-DAG: eor r1, r1, #-2147483648 +; CORTEXA8U-DARWIN-NOT: vneg.f32 ; CORTEXA9-LABEL: fneg_bitcast: ; CORTEXA9-DAG: eor r0, r0, #-2147483648 diff --git a/llvm/test/CodeGen/ARM/fnmscs.ll b/llvm/test/CodeGen/ARM/fnmscs.ll index 0fa878c0c2f49..49f9dcf32f544 100644 --- a/llvm/test/CodeGen/ARM/fnmscs.ll +++ b/llvm/test/CodeGen/ARM/fnmscs.ll @@ -13,11 +13,11 @@ ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 -regalloc=basic %s -o - \ ; RUN: | FileCheck %s -check-prefix=A8 -; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --enable-unsafe-fp-math %s -o - \ +; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 --denormal-fp-math=preserve-sign %s -o - \ ; RUN: | FileCheck %s -check-prefix=A8U ; RUN: llc -mtriple=arm-darwin -mcpu=cortex-a8 %s -o - \ -; RUN: | FileCheck %s -check-prefix=A8U +; RUN: | FileCheck %s -check-prefix=A8U-DARWIN define float @t1(float %acc, float %a, float %b) nounwind { entry: @@ -31,15 +31,20 @@ entry: ; NEON: vnmla.f32 ; A8U-LABEL: t1: -; A8U: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} -; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} +; A8U: vmov.i32 d{{[0-9]+}}, #0x80000000 +; A8U: vsub.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} +; A8U: vsub.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + +; A8U-DARWIN-LABEL: t1: +; A8U-DARWIN: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} +; A8U-DARWIN: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} ; A8-LABEL: t1: ; A8: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} ; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}} %0 = fmul float %a, %b %1 = fsub float -0.0, %0 - %2 = fsub float %1, %acc + %2 = fsub float %1, %acc ret float %2 } @@ -55,8 +60,13 @@ entry: ; NEON: vnmla.f32 ; A8U-LABEL: t2: -; A8U: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}} -; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} +; A8U: vmov.i32 d{{[0-9]+}}, #0x80000000 +; A8U: vsub.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} +; A8U: vsub.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} + +; A8U-DARWIN-LABEL: t2: +; A8U-DARWIN: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}} +; A8U-DARWIN: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}} ; A8-LABEL: t2: ; A8: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}} @@ -79,8 +89,12 @@ entry: ; NEON: vnmla.f64 ; A8U-LABEL: t3: -; A8U: vnmul.f64 d ; A8U: vsub.f64 d +; A8U: vsub.f64 d + +; A8U-DARWIN-LABEL: t3: +; A8U-DARWIN: vnmul.f64 d +; A8U-DARWIN: vsub.f64 d ; A8-LABEL: t3: ; A8: vnmul.f64 d @@ -103,8 +117,12 @@ entry: ; NEON: vnmla.f64 ; A8U-LABEL: t4: -; A8U: vnmul.f64 d ; A8U: vsub.f64 d +; A8U: vsub.f64 d + +; A8U-DARWIN-LABEL: t4: +; A8U-DARWIN: vnmul.f64 d +; A8U-DARWIN: vsub.f64 d ; A8-LABEL: t4: ; A8: vnmul.f64 d diff --git a/llvm/test/CodeGen/ARM/fnmul.ll b/llvm/test/CodeGen/ARM/fnmul.ll index b021de8b7ad00..655c9f8415402 100644 --- a/llvm/test/CodeGen/ARM/fnmul.ll +++ b/llvm/test/CodeGen/ARM/fnmul.ll @@ -1,15 +1,30 @@ -; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o - | FileCheck %s -check-prefix STRICT - -; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 -enable-unsafe-fp-math %s -o - | FileCheck %s -check-prefix UNSAFE +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=arm-eabi -mattr=+v6,+vfp2 %s -o - | FileCheck %s define double @t1(double %a, double %b) { -; STRICT: vnmul.f64 -; -; UNSAFE: vnmul.f64 +; CHECK-LABEL: t1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d0, r2, r3 +; CHECK-NEXT: vmov d1, r0, r1 +; CHECK-NEXT: vnmul.f64 d0, d1, d0 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: bx lr entry: - %tmp2 = fsub double -0.000000e+00, %a ; [#uses=1] - %tmp4 = fmul double %tmp2, %b ; [#uses=1] - ret double %tmp4 + %tmp2 = fsub double -0.000000e+00, %a + %tmp4 = fmul double %tmp2, %b + ret double %tmp4 } - +define double @tfast(double %a, double %b) { +; CHECK-LABEL: tfast: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d0, r2, r3 +; CHECK-NEXT: vmov d1, r0, r1 +; CHECK-NEXT: vnmul.f64 d0, d1, d0 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: bx lr +entry: + %tmp2 = fsub fast double -0.000000e+00, %a + %tmp4 = fmul fast double %tmp2, %b + ret double %tmp4 +} diff --git a/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll b/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll index 33ff71e8c473e..9d0ea0e2d37cf 100644 --- a/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll +++ b/llvm/test/CodeGen/ARM/fp16-vminmaxnm.ll @@ -1,5 +1,6 @@ -; RUN: llc < %s -mtriple=arm-eabi -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s -; RUN: llc < %s -mtriple thumbv7a -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+fullfp16 -enable-no-nans-fp-math | FileCheck %s +; RUN: llc < %s -mtriple=thumbv7a-none-eabihf -mattr=+fullfp16 -enable-no-nans-fp-math | FileCheck %s ; TODO: we can't pass half-precision arguments as "half" types yet. We do ; that for the time being by passing "float %f.coerce" and the necessary @@ -9,9 +10,11 @@ define half @fp16_vminnm_o(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_o: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -22,9 +25,11 @@ entry: define half @fp16_vminnm_o_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_o_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -35,9 +40,11 @@ entry: define half @fp16_vminnm_u(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_u: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -48,9 +55,11 @@ entry: define half @fp16_vminnm_ule(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_ule: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -61,9 +70,11 @@ entry: define half @fp16_vminnm_u_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_u_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -74,9 +85,11 @@ entry: define half @fp16_vmaxnm_o(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_o: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -87,9 +100,11 @@ entry: define half @fp16_vmaxnm_oge(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_oge: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -100,9 +115,11 @@ entry: define half @fp16_vmaxnm_o_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_o_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -113,9 +130,11 @@ entry: define half @fp16_vmaxnm_ole_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_ole_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -126,9 +145,11 @@ entry: define half @fp16_vmaxnm_u(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_u: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -139,9 +160,11 @@ entry: define half @fp16_vmaxnm_uge(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_uge: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -152,9 +175,11 @@ entry: define half @fp16_vmaxnm_u_rev(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_u_rev: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r1 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr entry: %0 = bitcast i16 %a to half %1 = bitcast i16 %b to half @@ -167,11 +192,17 @@ entry: define half @fp16_vminnm_NNNo(i16 signext %a) { ; CHECK-LABEL: fp16_vminnm_NNNo: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmov.f16 s2, #1.200000e+01 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI12_0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI12_0: +; CHECK-NEXT: .short 0x5040 @ half 34 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast olt half %0, 12. @@ -183,11 +214,19 @@ entry: define half @fp16_vminnm_NNNo_rev(i16 signext %a) { ; CHECK-LABEL: fp16_vminnm_NNNo_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI13_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI13_1 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI13_0: +; CHECK-NEXT: .short 0x5300 @ half 56 +; CHECK-NEXT: .LCPI13_1: +; CHECK-NEXT: .short 0x54e0 @ half 78 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ogt half %0, 56. @@ -199,11 +238,17 @@ entry: define half @fp16_vminnm_NNNu(i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_NNNu: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmov.f16 s2, #1.200000e+01 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI14_0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI14_0: +; CHECK-NEXT: .short 0x5040 @ half 34 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ult half 12., %0 @@ -215,11 +260,19 @@ entry: define half @fp16_vminnm_NNNule(i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_NNNule: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI15_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI15_1 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI15_0: +; CHECK-NEXT: .short 0x5040 @ half 34 +; CHECK-NEXT: .LCPI15_1: +; CHECK-NEXT: .short 0x5300 @ half 56 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ule half 34., %0 @@ -231,11 +284,19 @@ entry: define half @fp16_vminnm_NNNu_rev(i16 signext %b) { ; CHECK-LABEL: fp16_vminnm_NNNu_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vminnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI16_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI16_1 +; CHECK-NEXT: vminnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI16_0: +; CHECK-NEXT: .short 0x5300 @ half 56 +; CHECK-NEXT: .LCPI16_1: +; CHECK-NEXT: .short 0x54e0 @ half 78 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ugt half 56., %0 @@ -247,11 +308,17 @@ entry: define half @fp16_vmaxnm_NNNo(i16 signext %a) { ; CHECK-LABEL: fp16_vmaxnm_NNNo: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmov.f16 s2, #1.200000e+01 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI17_0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI17_0: +; CHECK-NEXT: .short 0x5040 @ half 34 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ogt half %0, 12. @@ -263,11 +330,19 @@ entry: define half @fp16_vmaxnm_NNNoge(i16 signext %a) { ; CHECK-LABEL: fp16_vmaxnm_NNNoge: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI18_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI18_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI18_0: +; CHECK-NEXT: .short 0x5040 @ half 34 +; CHECK-NEXT: .LCPI18_1: +; CHECK-NEXT: .short 0x5300 @ half 56 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast oge half %0, 34. @@ -279,11 +354,19 @@ entry: define half @fp16_vmaxnm_NNNo_rev(i16 signext %a) { ; CHECK-LABEL: fp16_vmaxnm_NNNo_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI19_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI19_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI19_0: +; CHECK-NEXT: .short 0x5300 @ half 56 +; CHECK-NEXT: .LCPI19_1: +; CHECK-NEXT: .short 0x54e0 @ half 78 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast olt half %0, 56. @@ -295,11 +378,19 @@ entry: define half @fp16_vmaxnm_NNNole_rev(i16 signext %a) { ; CHECK-LABEL: fp16_vmaxnm_NNNole_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI20_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI20_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI20_0: +; CHECK-NEXT: .short 0x54e0 @ half 78 +; CHECK-NEXT: .LCPI20_1: +; CHECK-NEXT: .short 0x55a0 @ half 90 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ole half %0, 78. @@ -311,11 +402,17 @@ entry: define half @fp16_vmaxnm_NNNu(i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_NNNu: -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01 -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmov.f16 s2, #1.200000e+01 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI21_0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI21_0: +; CHECK-NEXT: .short 0x5040 @ half 34 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ugt half 12., %0 @@ -327,11 +424,19 @@ entry: define half @fp16_vmaxnm_NNNuge(i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_NNNuge: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI22_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI22_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI22_0: +; CHECK-NEXT: .short 0x5040 @ half 34 +; CHECK-NEXT: .LCPI22_1: +; CHECK-NEXT: .short 0x5300 @ half 56 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast uge half 34., %0 @@ -343,11 +448,19 @@ entry: define half @fp16_vmaxnm_NNNu_rev(i16 signext %b) { ; CHECK-LABEL: fp16_vmaxnm_NNNu_rev: -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] -; CHECK: vldr.16 s2, .LCPI{{.*}} -; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s2, .LCPI23_0 +; CHECK-NEXT: vmov.f16 s0, r0 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: vldr.16 s2, .LCPI23_1 +; CHECK-NEXT: vmaxnm.f16 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI23_0: +; CHECK-NEXT: .short 0x5300 @ half 56 +; CHECK-NEXT: .LCPI23_1: +; CHECK-NEXT: .short 0x54e0 @ half 78 entry: %0 = bitcast i16 %b to half %cmp1 = fcmp fast ult half 56., %0 @@ -359,10 +472,16 @@ entry: define half @fp16_vminmaxnm_0(i16 signext %a) { ; CHECK-LABEL: fp16_vminmaxnm_0: -; CHECK: vldr.16 s0, .LCPI{{.*}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s2, s2, s0 -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s0, .LCPI24_0 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s2, s2, s0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI24_0: +; CHECK-NEXT: .short 0x0000 @ half 0 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast olt half %0, 0. @@ -374,10 +493,16 @@ entry: define half @fp16_vminmaxnm_neg0(i16 signext %a) { ; CHECK-LABEL: fp16_vminmaxnm_neg0: -; CHECK: vldr.16 s0, .LCPI{{.*}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s2, s2, s0 -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s0, .LCPI25_0 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s2, s2, s0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI25_0: +; CHECK-NEXT: .short 0x8000 @ half -0 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast olt half %0, -0. @@ -389,10 +514,16 @@ entry: define half @fp16_vminmaxnm_e_0(i16 signext %a) { ; CHECK-LABEL: fp16_vminmaxnm_e_0: -; CHECK: vldr.16 s0, .LCPI{{.*}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s2, s2, s0 -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s0, .LCPI26_0 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s2, s2, s0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI26_0: +; CHECK-NEXT: .short 0x0000 @ half 0 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ule half 0., %0 @@ -404,10 +535,16 @@ entry: define half @fp16_vminmaxnm_e_neg0(i16 signext %a) { ; CHECK-LABEL: fp16_vminmaxnm_e_neg0: -; CHECK: vldr.16 s0, .LCPI{{.*}} -; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}} -; CHECK: vminnm.f16 s2, s2, s0 -; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]] +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldr.16 s0, .LCPI27_0 +; CHECK-NEXT: vmov.f16 s2, r0 +; CHECK-NEXT: vminnm.f16 s2, s2, s0 +; CHECK-NEXT: vmaxnm.f16 s0, s2, s0 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 1 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI27_0: +; CHECK-NEXT: .short 0x8000 @ half -0 entry: %0 = bitcast i16 %a to half %cmp1 = fcmp fast ule half -0., %0 diff --git a/llvm/test/CodeGen/ARM/fp16.ll b/llvm/test/CodeGen/ARM/fp16.ll deleted file mode 100644 index 9ff701050ac7e..0000000000000 --- a/llvm/test/CodeGen/ARM/fp16.ll +++ /dev/null @@ -1,105 +0,0 @@ -; RUN: llc -mtriple=armv7a--none-eabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-EABI %s -; RUN: llc -mtriple=armv7a--none-gnueabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-GNU %s -; RUN: llc -mtriple=armv7a--none-musleabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-HARDFLOAT-GNU %s -; RUN: llc -mtriple=armv8-eabihf < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-ARMV8 %s -; RUN: llc -mtriple=thumbv7m-eabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-EABI %s -; RUN: llc -mtriple=thumbv7m-gnueabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-GNU %s -; RUN: llc -mtriple=thumbv7m-musleabi < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SOFTFLOAT-GNU %s - -;; +fp16 is special: it has f32->f16 (unlike v7), but not f64->f16 (unlike v8). -;; This exposes unsafe-fp-math optimization opportunities; test that. -; RUN: llc -mattr=+vfp3,+fp16 < %s |\ -; RUN: FileCheck --check-prefix=CHECK --check-prefix=CHECK-FP16 --check-prefix=CHECK-FP16-SAFE %s -; RUN: llc -mattr=+vfp3,+fp16 < %s -enable-unsafe-fp-math |\ -; RUN: FileCheck --check-prefix=CHECK --check-prefix=CHECK-FP16 --check-prefix=CHECK-FP16-UNSAFE %s - -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32" -target triple = "armv7---eabihf" - -@x = global i16 12902 -@y = global i16 0 -@z = common global i16 0 - -define void @foo() nounwind { -; CHECK-LABEL: foo: -entry: - %0 = load i16, ptr @x, align 2 - %1 = load i16, ptr @y, align 2 - %2 = tail call float @llvm.convert.from.fp16.f32(i16 %0) -; CHECK-HARDFLOAT-EABI: __aeabi_h2f -; CHECK-HARDFLOAT-GNU: __gnu_h2f_ieee -; CHECK-FP16: vcvtb.f32.f16 -; CHECK-ARMV8: vcvtb.f32.f16 -; CHECK-SOFTFLOAT-EABI: __aeabi_h2f -; CHECK-SOFTFLOAT-GNU: __gnu_h2f_ieee - %3 = tail call float @llvm.convert.from.fp16.f32(i16 %1) -; CHECK-HARDFLOAT-EABI: __aeabi_h2f -; CHECK-HARDFLOAT-GNU: __gnu_h2f_ieee -; CHECK-FP16: vcvtb.f32.f16 -; CHECK-ARMV8: vcvtb.f32.f16 -; CHECK-SOFTFLOAT-EABI: __aeabi_h2f -; CHECK-SOFTFLOAT-GNU: __gnu_h2f_ieee - %4 = fadd float %2, %3 - %5 = tail call i16 @llvm.convert.to.fp16.f32(float %4) -; CHECK-HARDFLOAT-EABI: __aeabi_f2h -; CHECK-HARDFLOAT-GNU: __gnu_f2h_ieee -; CHECK-FP16: vcvtb.f16.f32 -; CHECK-ARMV8: vcvtb.f16.f32 -; CHECK-SOFTFLOAT-EABI: __aeabi_f2h -; CHECK-SOFTFLOAT-GNU: __gnu_f2h_ieee - store i16 %5, ptr @x, align 2 - ret void -} - -define double @test_from_fp16(i16 %in) { -; CHECK-LABEL: test_from_fp16: - %val = call double @llvm.convert.from.fp16.f64(i16 %in) -; CHECK-HARDFLOAT-EABI: bl __aeabi_h2f -; CHECK-HARDFLOAT-EABI: vmov [[TMP:s[0-9]+]], r0 -; CHECK-HARDFLOAT-EABI: vcvt.f64.f32 {{d[0-9]+}}, [[TMP]] - -; CHECK-HARDFLOAT-GNU: bl __gnu_h2f_ieee -; CHECK-HARDFLOAT-GNU: vmov [[TMP:s[0-9]+]], r0 -; CHECK-HARDFLOAT-GNU: vcvt.f64.f32 {{d[0-9]+}}, [[TMP]] - -; CHECK-FP16: vmov [[TMP16:s[0-9]+]], r0 -; CHECK-FP16: vcvtb.f32.f16 [[TMP32:s[0-9]+]], [[TMP16]] -; CHECK-FP16: vcvt.f64.f32 d0, [[TMP32]] - -; CHECK-ARMV8: vmov [[TMP:s[0-9]+]], r0 -; CHECK-ARMV8: vcvtb.f64.f16 d0, [[TMP]] - -; CHECK-SOFTFLOAT-EABI: bl __aeabi_h2f -; CHECK-SOFTFLOAT-EABI: bl __aeabi_f2d - -; CHECK-SOFTFLOAT-GNU: bl __gnu_h2f_ieee -; CHECK-SOFTFLOAT-GNU: bl __aeabi_f2d - ret double %val -} - -define i16 @test_to_fp16(double %in) { -; CHECK-LABEL: test_to_fp16: - %val = call i16 @llvm.convert.to.fp16.f64(double %in) -; CHECK-HARDFLOAT-EABI: bl __aeabi_d2h - -; CHECK-HARDFLOAT-GNU: bl __aeabi_d2h - -; CHECK-FP16-SAFE: bl __aeabi_d2h - -; CHECK-FP16-UNSAFE: vmov r0, r1, d0 -; CHECK-FP16-UNSAFE-NEXT: bl __aeabi_d2h - -; CHECK-ARMV8: vcvtb.f16.f64 [[TMP:s[0-9]+]], d0 -; CHECK-ARMV8: vmov r0, [[TMP]] - -; CHECK-SOFTFLOAT-EABI: bl __aeabi_d2h - -; CHECK-SOFTFLOAT-GNU: bl __aeabi_d2h - ret i16 %val -} - -declare float @llvm.convert.from.fp16.f32(i16) nounwind readnone -declare double @llvm.convert.from.fp16.f64(i16) nounwind readnone - -declare i16 @llvm.convert.to.fp16.f32(float) nounwind readnone -declare i16 @llvm.convert.to.fp16.f64(double) nounwind readnone diff --git a/llvm/test/CodeGen/ARM/sincos.ll b/llvm/test/CodeGen/ARM/sincos.ll index dc8fdf69ca610..e1b683a8a6657 100644 --- a/llvm/test/CodeGen/ARM/sincos.ll +++ b/llvm/test/CodeGen/ARM/sincos.ll @@ -1,8 +1,7 @@ ; RUN: llc < %s -mtriple=armv7-apple-ios6 -mcpu=cortex-a8 | FileCheck %s --check-prefix=NOOPT ; RUN: llc < %s -mtriple=armv7-apple-ios7 -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS ; RUN: llc < %s -mtriple=armv7-linux-gnu -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 \ -; RUN: --enable-unsafe-fp-math | FileCheck %s --check-prefix=SINCOS-GNU +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU ; RUN: llc < %s -mtriple=armv7-linux-android -mcpu=cortex-a8 | FileCheck %s --check-prefix=NOOPT-ANDROID ; RUN: llc < %s -mtriple=armv7-linux-android9 -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU @@ -33,6 +32,28 @@ entry: ret float %add } +define float @test1_fast(float %x) nounwind { +entry: +; SINCOS-LABEL: test1_fast: +; SINCOS: bl ___sincosf_stret + +; SINCOS-GNU-LABEL: test1_fast: +; SINCOS-GNU: bl sincosf + +; NOOPT-LABEL: test1_fast: +; NOOPT: bl _sinf +; NOOPT: bl _cosf + +; NOOPT-ANDROID-LABEL: test1_fast: +; NOOPT-ANDROID: bl sinf +; NOOPT-ANDROID: bl cosf + + %call = tail call fast float @sinf(float %x) readnone + %call1 = tail call fast float @cosf(float %x) readnone + %add = fadd float %call, %call1 + ret float %add +} + define float @test1_errno(float %x) nounwind { entry: ; SINCOS-LABEL: test1_errno: @@ -79,6 +100,28 @@ entry: ret double %add } +define double @test2_fast(double %x) nounwind { +entry: +; SINCOS-LABEL: test2_fast: +; SINCOS: bl ___sincos_stret + +; SINCOS-GNU-LABEL: test2_fast: +; SINCOS-GNU: bl sincos + +; NOOPT-LABEL: test2_fast: +; NOOPT: bl _sin +; NOOPT: bl _cos + +; NOOPT-ANDROID-LABEL: test2_fast: +; NOOPT-ANDROID: bl sin +; NOOPT-ANDROID: bl cos + + %call = tail call fast double @sin(double %x) readnone + %call1 = tail call fast double @cos(double %x) readnone + %add = fadd double %call, %call1 + ret double %add +} + define double @test2_errno(double %x) nounwind { entry: ; SINCOS-LABEL: test2_errno: diff --git a/llvm/test/CodeGen/ARM/vminmaxnm.ll b/llvm/test/CodeGen/ARM/vminmaxnm.ll index bb3ea3067541e..be33dbfc61b04 100644 --- a/llvm/test/CodeGen/ARM/vminmaxnm.ll +++ b/llvm/test/CodeGen/ARM/vminmaxnm.ll @@ -1,146 +1,163 @@ -; RUN: llc < %s -mtriple armv8 -mattr=+neon,+fp-armv8 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple armv8-none-eabihf -mattr=+neon,+fp-armv8 -enable-no-nans-fp-math | FileCheck %s ; scalars -define float @fp-armv8_vminnm_o(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_o": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_o(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_o: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast olt float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define double @fp-armv8_vminnm_ole(double %a, double %b) { -; CHECK-LABEL: "fp-armv8_vminnm_ole": -; CHECK-NOT: vcmp -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_ole(double %a, double %b) { +; CHECK-LABEL: fparmv8_vminnm_ole: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f64 d0, d0, d1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ole double %a, %b %cond = select nsz i1 %cmp, double %a, double %b ret double %cond } -define float @fp-armv8_vminnm_o_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_o_rev": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_o_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_o_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ogt float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define double @fp-armv8_vminnm_oge_rev(double %a, double %b) { -; CHECK-LABEL: "fp-armv8_vminnm_oge_rev": -; CHECK-NOT: vcmp -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_oge_rev(double %a, double %b) { +; CHECK-LABEL: fparmv8_vminnm_oge_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f64 d0, d0, d1 +; CHECK-NEXT: bx lr %cmp = fcmp fast oge double %a, %b %cond = select nsz i1 %cmp, double %b, double %a ret double %cond } -define float @fp-armv8_vminnm_u(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_u": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_u(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_u: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ult float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vminnm_ule(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_ule": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_ule(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_ule: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ule float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vminnm_u_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_u_rev": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_u_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vminnm_u_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ugt float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define double @fp-armv8_vminnm_uge_rev(double %a, double %b) { -; CHECK-LABEL: "fp-armv8_vminnm_uge_rev": -; CHECK-NOT: vcmp -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_uge_rev(double %a, double %b) { +; CHECK-LABEL: fparmv8_vminnm_uge_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminnm.f64 d0, d0, d1 +; CHECK-NEXT: bx lr %cmp = fcmp fast uge double %a, %b %cond = select nsz i1 %cmp, double %b, double %a ret double %cond } -define float @fp-armv8_vmaxnm_o(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_o": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_o(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_o: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ogt float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vmaxnm_oge(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_oge": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_oge(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_oge: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast oge float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vmaxnm_o_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_o_rev": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_o_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_o_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast olt float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define float @fp-armv8_vmaxnm_ole_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_ole_rev": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_ole_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_ole_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ole float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define float @fp-armv8_vmaxnm_u(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_u": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_u(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_u: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ugt float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vmaxnm_uge(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_uge": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_uge(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_uge: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast uge float %a, %b %cond = select nsz i1 %cmp, float %a, float %b ret float %cond } -define float @fp-armv8_vmaxnm_u_rev(float %a, float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_u_rev": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_u_rev(float %a, float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_u_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f32 s0, s0, s1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ult float %a, %b %cond = select nsz i1 %cmp, float %b, float %a ret float %cond } -define double @fp-armv8_vmaxnm_ule_rev(double %a, double %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_ule_rev": -; CHECK-NOT: vcmp -; CHECK: vmaxnm.f64 +define double @fparmv8_vmaxnm_ule_rev(double %a, double %b) { +; CHECK-LABEL: fparmv8_vmaxnm_ule_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxnm.f64 d0, d0, d1 +; CHECK-NEXT: bx lr %cmp = fcmp fast ule double %a, %b %cond = select nsz i1 %cmp, double %b, double %a ret double %cond @@ -148,10 +165,18 @@ define double @fp-armv8_vmaxnm_ule_rev(double %a, double %b) { ; known non-NaNs -define float @fp-armv8_vminnm_NNNo(float %a) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNo": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNo(float %a) { +; CHECK-LABEL: fparmv8_vminnm_NNNo: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.f32 s2, #1.200000e+01 +; CHECK-NEXT: vldr s4, .LCPI16_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI16_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 %cmp1 = fcmp fast olt float %a, 12. %cond1 = select nsz i1 %cmp1, float %a, float 12. %cmp2 = fcmp fast olt float 34., %cond1 @@ -159,10 +184,22 @@ define float @fp-armv8_vminnm_NNNo(float %a) { ret float %cond2 } -define double @fp-armv8_vminnm_NNNole(double %a) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNole": -; CHECK: vminnm.f64 -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_NNNole(double %a) { +; CHECK-LABEL: fparmv8_vminnm_NNNole: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d16, .LCPI17_0 +; CHECK-NEXT: vldr d17, .LCPI17_1 +; CHECK-NEXT: vminnm.f64 d16, d0, d16 +; CHECK-NEXT: vminnm.f64 d0, d16, d17 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI17_0: +; CHECK-NEXT: .long 0 @ double 34 +; CHECK-NEXT: .long 1078001664 +; CHECK-NEXT: .LCPI17_1: +; CHECK-NEXT: .long 0 @ double 56 +; CHECK-NEXT: .long 1078722560 %cmp1 = fcmp fast ole double %a, 34. %cond1 = select nsz i1 %cmp1, double %a, double 34. %cmp2 = fcmp fast ole double 56., %cond1 @@ -170,10 +207,20 @@ define double @fp-armv8_vminnm_NNNole(double %a) { ret double %cond2 } -define float @fp-armv8_vminnm_NNNo_rev(float %a) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNo_rev": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNo_rev(float %a) { +; CHECK-LABEL: fparmv8_vminnm_NNNo_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI18_0 +; CHECK-NEXT: vldr s4, .LCPI18_1 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI18_0: +; CHECK-NEXT: .long 0x42600000 @ float 56 +; CHECK-NEXT: .LCPI18_1: +; CHECK-NEXT: .long 0x429c0000 @ float 78 %cmp1 = fcmp fast ogt float %a, 56. %cond1 = select nsz i1 %cmp1, float 56., float %a %cmp2 = fcmp fast ogt float 78., %cond1 @@ -181,10 +228,22 @@ define float @fp-armv8_vminnm_NNNo_rev(float %a) { ret float %cond2 } -define double @fp-armv8_vminnm_NNNoge_rev(double %a) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNoge_rev": -; CHECK: vminnm.f64 -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_NNNoge_rev(double %a) { +; CHECK-LABEL: fparmv8_vminnm_NNNoge_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d16, .LCPI19_0 +; CHECK-NEXT: vldr d17, .LCPI19_1 +; CHECK-NEXT: vminnm.f64 d16, d0, d16 +; CHECK-NEXT: vminnm.f64 d0, d16, d17 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI19_0: +; CHECK-NEXT: .long 0 @ double 78 +; CHECK-NEXT: .long 1079214080 +; CHECK-NEXT: .LCPI19_1: +; CHECK-NEXT: .long 0 @ double 90 +; CHECK-NEXT: .long 1079410688 %cmp1 = fcmp fast oge double %a, 78. %cond1 = select nsz i1 %cmp1, double 78., double %a %cmp2 = fcmp fast oge double 90., %cond1 @@ -192,10 +251,18 @@ define double @fp-armv8_vminnm_NNNoge_rev(double %a) { ret double %cond2 } -define float @fp-armv8_vminnm_NNNu(float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNu": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNu(float %b) { +; CHECK-LABEL: fparmv8_vminnm_NNNu: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.f32 s2, #1.200000e+01 +; CHECK-NEXT: vldr s4, .LCPI20_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI20_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 %cmp1 = fcmp fast ult float 12., %b %cond1 = select nsz i1 %cmp1, float 12., float %b %cmp2 = fcmp fast ult float %cond1, 34. @@ -203,10 +270,20 @@ define float @fp-armv8_vminnm_NNNu(float %b) { ret float %cond2 } -define float @fp-armv8_vminnm_NNNule(float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNule": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNule(float %b) { +; CHECK-LABEL: fparmv8_vminnm_NNNule: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI21_0 +; CHECK-NEXT: vldr s4, .LCPI21_1 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI21_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 +; CHECK-NEXT: .LCPI21_1: +; CHECK-NEXT: .long 0x42600000 @ float 56 %cmp1 = fcmp fast ule float 34., %b %cond1 = select nsz i1 %cmp1, float 34., float %b %cmp2 = fcmp fast ule float %cond1, 56. @@ -214,10 +291,20 @@ define float @fp-armv8_vminnm_NNNule(float %b) { ret float %cond2 } -define float @fp-armv8_vminnm_NNNu_rev(float %b) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNu_rev": -; CHECK: vminnm.f32 -; CHECK: vminnm.f32 +define float @fparmv8_vminnm_NNNu_rev(float %b) { +; CHECK-LABEL: fparmv8_vminnm_NNNu_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI22_0 +; CHECK-NEXT: vldr s4, .LCPI22_1 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vminnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI22_0: +; CHECK-NEXT: .long 0x42600000 @ float 56 +; CHECK-NEXT: .LCPI22_1: +; CHECK-NEXT: .long 0x429c0000 @ float 78 %cmp1 = fcmp fast ugt float 56., %b %cond1 = select nsz i1 %cmp1, float %b, float 56. %cmp2 = fcmp fast ugt float %cond1, 78. @@ -225,10 +312,22 @@ define float @fp-armv8_vminnm_NNNu_rev(float %b) { ret float %cond2 } -define double @fp-armv8_vminnm_NNNuge_rev(double %b) { -; CHECK-LABEL: "fp-armv8_vminnm_NNNuge_rev": -; CHECK: vminnm.f64 -; CHECK: vminnm.f64 +define double @fparmv8_vminnm_NNNuge_rev(double %b) { +; CHECK-LABEL: fparmv8_vminnm_NNNuge_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d16, .LCPI23_0 +; CHECK-NEXT: vldr d17, .LCPI23_1 +; CHECK-NEXT: vminnm.f64 d16, d0, d16 +; CHECK-NEXT: vminnm.f64 d0, d16, d17 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI23_0: +; CHECK-NEXT: .long 0 @ double 78 +; CHECK-NEXT: .long 1079214080 +; CHECK-NEXT: .LCPI23_1: +; CHECK-NEXT: .long 0 @ double 90 +; CHECK-NEXT: .long 1079410688 %cmp1 = fcmp fast uge double 78., %b %cond1 = select nsz i1 %cmp1, double %b, double 78. %cmp2 = fcmp fast uge double %cond1, 90. @@ -236,10 +335,18 @@ define double @fp-armv8_vminnm_NNNuge_rev(double %b) { ret double %cond2 } -define float @fp-armv8_vmaxnm_NNNo(float %a) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNo": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNo(float %a) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNo: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.f32 s2, #1.200000e+01 +; CHECK-NEXT: vldr s4, .LCPI24_0 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI24_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 %cmp1 = fcmp fast ogt float %a, 12. %cond1 = select nsz i1 %cmp1, float %a, float 12. %cmp2 = fcmp fast ogt float 34., %cond1 @@ -247,10 +354,20 @@ define float @fp-armv8_vmaxnm_NNNo(float %a) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNoge(float %a) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNoge": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNoge(float %a) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNoge: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI25_0 +; CHECK-NEXT: vldr s4, .LCPI25_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI25_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 +; CHECK-NEXT: .LCPI25_1: +; CHECK-NEXT: .long 0x42600000 @ float 56 %cmp1 = fcmp fast oge float %a, 34. %cond1 = select nsz i1 %cmp1, float %a, float 34. %cmp2 = fcmp fast oge float 56., %cond1 @@ -258,10 +375,20 @@ define float @fp-armv8_vmaxnm_NNNoge(float %a) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNo_rev(float %a) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNo_rev": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNo_rev(float %a) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNo_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI26_0 +; CHECK-NEXT: vldr s4, .LCPI26_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI26_0: +; CHECK-NEXT: .long 0x42600000 @ float 56 +; CHECK-NEXT: .LCPI26_1: +; CHECK-NEXT: .long 0x429c0000 @ float 78 %cmp1 = fcmp fast olt float %a, 56. %cond1 = select nsz i1 %cmp1, float 56., float %a %cmp2 = fcmp fast olt float 78., %cond1 @@ -269,10 +396,20 @@ define float @fp-armv8_vmaxnm_NNNo_rev(float %a) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNole_rev(float %a) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNole_rev": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNole_rev(float %a) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNole_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI27_0 +; CHECK-NEXT: vldr s4, .LCPI27_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI27_0: +; CHECK-NEXT: .long 0x429c0000 @ float 78 +; CHECK-NEXT: .LCPI27_1: +; CHECK-NEXT: .long 0x42b40000 @ float 90 %cmp1 = fcmp fast ole float %a, 78. %cond1 = select nsz i1 %cmp1, float 78., float %a %cmp2 = fcmp fast ole float 90., %cond1 @@ -280,10 +417,18 @@ define float @fp-armv8_vmaxnm_NNNole_rev(float %a) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNu(float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNu": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNu(float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNu: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmov.f32 s2, #1.200000e+01 +; CHECK-NEXT: vldr s4, .LCPI28_0 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI28_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 %cmp1 = fcmp fast ugt float 12., %b %cond1 = select nsz i1 %cmp1, float 12., float %b %cmp2 = fcmp fast ugt float %cond1, 34. @@ -291,10 +436,20 @@ define float @fp-armv8_vmaxnm_NNNu(float %b) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNuge(float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNuge": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNuge(float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNuge: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI29_0 +; CHECK-NEXT: vldr s4, .LCPI29_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI29_0: +; CHECK-NEXT: .long 0x42080000 @ float 34 +; CHECK-NEXT: .LCPI29_1: +; CHECK-NEXT: .long 0x42600000 @ float 56 %cmp1 = fcmp fast uge float 34., %b %cond1 = select nsz i1 %cmp1, float 34., float %b %cmp2 = fcmp fast uge float %cond1, 56. @@ -302,10 +457,20 @@ define float @fp-armv8_vmaxnm_NNNuge(float %b) { ret float %cond2 } -define float @fp-armv8_vmaxnm_NNNu_rev(float %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNu_rev": -; CHECK: vmaxnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vmaxnm_NNNu_rev(float %b) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNu_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI30_0 +; CHECK-NEXT: vldr s4, .LCPI30_1 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s4 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI30_0: +; CHECK-NEXT: .long 0x42600000 @ float 56 +; CHECK-NEXT: .LCPI30_1: +; CHECK-NEXT: .long 0x429c0000 @ float 78 %cmp1 = fcmp fast ult float 56., %b %cond1 = select nsz i1 %cmp1, float %b, float 56. %cmp2 = fcmp fast ult float %cond1, 78. @@ -313,10 +478,22 @@ define float @fp-armv8_vmaxnm_NNNu_rev(float %b) { ret float %cond2 } -define double @fp-armv8_vmaxnm_NNNule_rev( double %b) { -; CHECK-LABEL: "fp-armv8_vmaxnm_NNNule_rev": -; CHECK: vmaxnm.f64 -; CHECK: vmaxnm.f64 +define double @fparmv8_vmaxnm_NNNule_rev( double %b) { +; CHECK-LABEL: fparmv8_vmaxnm_NNNule_rev: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr d16, .LCPI31_0 +; CHECK-NEXT: vldr d17, .LCPI31_1 +; CHECK-NEXT: vmaxnm.f64 d16, d0, d16 +; CHECK-NEXT: vmaxnm.f64 d0, d16, d17 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 3 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI31_0: +; CHECK-NEXT: .long 0 @ double 78 +; CHECK-NEXT: .long 1079214080 +; CHECK-NEXT: .LCPI31_1: +; CHECK-NEXT: .long 0 @ double 90 +; CHECK-NEXT: .long 1079410688 %cmp1 = fcmp fast ule double 78., %b %cond1 = select nsz i1 %cmp1, double %b, double 78. %cmp2 = fcmp fast ule double %cond1, 90. @@ -324,11 +501,17 @@ define double @fp-armv8_vmaxnm_NNNule_rev( double %b) { ret double %cond2 } -define float @fp-armv8_vminmaxnm_0(float %a) { -; CHECK-LABEL: "fp-armv8_vminmaxnm_0": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vminmaxnm_0(float %a) { +; CHECK-LABEL: fparmv8_vminmaxnm_0: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI32_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI32_0: +; CHECK-NEXT: .long 0x00000000 @ float 0 %cmp1 = fcmp fast olt float %a, 0. %cond1 = select nsz i1 %cmp1, float %a, float 0. %cmp2 = fcmp fast ogt float %cond1, 0. @@ -336,11 +519,17 @@ define float @fp-armv8_vminmaxnm_0(float %a) { ret float %cond2 } -define float @fp-armv8_vminmaxnm_neg0(float %a) { -; CHECK-LABEL: "fp-armv8_vminmaxnm_neg0": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vminmaxnm_neg0(float %a) { +; CHECK-LABEL: fparmv8_vminmaxnm_neg0: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI33_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI33_0: +; CHECK-NEXT: .long 0x80000000 @ float -0 %cmp1 = fcmp fast olt float %a, -0. %cond1 = select nsz i1 %cmp1, float %a, float -0. %cmp2 = fcmp fast ugt float %cond1, -0. @@ -348,11 +537,17 @@ define float @fp-armv8_vminmaxnm_neg0(float %a) { ret float %cond2 } -define float @fp-armv8_vminmaxnm_e_0(float %a) { -; CHECK-LABEL: "fp-armv8_vminmaxnm_e_0": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vminmaxnm_e_0(float %a) { +; CHECK-LABEL: fparmv8_vminmaxnm_e_0: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI34_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI34_0: +; CHECK-NEXT: .long 0x00000000 @ float 0 %cmp1 = fcmp fast ule float 0., %a %cond1 = select nsz i1 %cmp1, float 0., float %a %cmp2 = fcmp fast uge float 0., %cond1 @@ -360,11 +555,17 @@ define float @fp-armv8_vminmaxnm_e_0(float %a) { ret float %cond2 } -define float @fp-armv8_vminmaxnm_e_neg0(float %a) { -; CHECK-LABEL: "fp-armv8_vminmaxnm_e_neg0": -; CHECK-NOT: vcmp -; CHECK: vminnm.f32 -; CHECK: vmaxnm.f32 +define float @fparmv8_vminmaxnm_e_neg0(float %a) { +; CHECK-LABEL: fparmv8_vminmaxnm_e_neg0: +; CHECK: @ %bb.0: +; CHECK-NEXT: vldr s2, .LCPI35_0 +; CHECK-NEXT: vminnm.f32 s0, s0, s2 +; CHECK-NEXT: vmaxnm.f32 s0, s0, s2 +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI35_0: +; CHECK-NEXT: .long 0x80000000 @ float -0 %cmp1 = fcmp fast ule float -0., %a %cond1 = select nsz i1 %cmp1, float -0., float %a %cmp2 = fcmp fast oge float -0., %cond1 diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinations.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinations.ll index d6cb05b5d0dd9..850b9a7f36ff3 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinations.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinations.ll @@ -11,8 +11,9 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 2 } ; function, root signature -!3 = !{ !5 } ; list of root signature elements -!5 = !{ !"DescriptorTable", i32 0, !6, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20 } +!3 = !{ !5, !21 } ; list of root signature elements +!5 = !{ !"DescriptorTable", i32 0, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20 } +!21 = !{ !"DescriptorTable", i32 0, !6, !8, !9 } ; typedef enum D3D12_DESCRIPTOR_RANGE_FLAGS { ; NONE = 0, @@ -53,37 +54,20 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !20 = !{ !"UAV", i32 5, i32 1, i32 15, i32 5, i32 65540 } ;DXC:- Name: RTS0 -;DXC-NEXT: Size: 380 +;DXC-NEXT: Size: 400 ;DXC-NEXT: RootSignature: ;DXC-NEXT: Version: 2 -;DXC-NEXT: NumRootParameters: 1 +;DXC-NEXT: NumRootParameters: 2 ;DXC-NEXT: RootParametersOffset: 24 ;DXC-NEXT: NumStaticSamplers: 0 -;DXC-NEXT: StaticSamplersOffset: 380 +;DXC-NEXT: StaticSamplersOffset: 400 ;DXC-NEXT: Parameters: ;DXC-NEXT: - ParameterType: DescriptorTable ;DXC-NEXT: ShaderVisibility: All ;DXC-NEXT: Table: -;DXC-NEXT: NumRanges: 14 -;DXC-NEXT: RangesOffset: 44 +;DXC-NEXT: NumRanges: 11 +;DXC-NEXT: RangesOffset: 56 ;DXC-NEXT: Ranges: -;DXC-NEXT: - RangeType: Sampler -;DXC-NEXT: NumDescriptors: 1 -;DXC-NEXT: BaseShaderRegister: 0 -;DXC-NEXT: RegisterSpace: 1 -;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 -;DXC-NEXT: - RangeType: Sampler -;DXC-NEXT: NumDescriptors: 1 -;DXC-NEXT: BaseShaderRegister: 0 -;DXC-NEXT: RegisterSpace: 3 -;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 -;DXC-NEXT: DESCRIPTORS_VOLATILE: true -;DXC-NEXT: - RangeType: Sampler -;DXC-NEXT: NumDescriptors: 1 -;DXC-NEXT: BaseShaderRegister: 0 -;DXC-NEXT: RegisterSpace: 4 -;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 -;DXC-NEXT: DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS: true ;DXC-NEXT: - RangeType: SRV ;DXC-NEXT: NumDescriptors: 1 ;DXC-NEXT: BaseShaderRegister: 0 @@ -155,3 +139,26 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } ;DXC-NEXT: OffsetInDescriptorsFromTableStart: 5 ;DXC-NEXT: DATA_STATIC_WHILE_SET_AT_EXECUTE: true ;DXC-NEXT: DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS: true +;DXC-NEXT: - ParameterType: DescriptorTable +;DXC-NEXT: ShaderVisibility: All +;DXC-NEXT: Table: +;DXC-NEXT: NumRanges: 3 +;DXC-NEXT: RangesOffset: 328 +;DXC-NEXT: Ranges: +;DXC-NEXT: - RangeType: Sampler +;DXC-NEXT: NumDescriptors: 1 +;DXC-NEXT: BaseShaderRegister: 0 +;DXC-NEXT: RegisterSpace: 1 +;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 +;DXC-NEXT: - RangeType: Sampler +;DXC-NEXT: NumDescriptors: 1 +;DXC-NEXT: BaseShaderRegister: 0 +;DXC-NEXT: RegisterSpace: 3 +;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 +;DXC-NEXT: DESCRIPTORS_VOLATILE: true +;DXC-NEXT: - RangeType: Sampler +;DXC-NEXT: NumDescriptors: 1 +;DXC-NEXT: BaseShaderRegister: 0 +;DXC-NEXT: RegisterSpace: 4 +;DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 +;DXC-NEXT: DESCRIPTORS_STATIC_KEEPING_BUFFER_BOUNDS_CHECKS: true diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinationsV1.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinationsV1.ll index c65eab5f4aa5f..098b2d51a0bf4 100644 --- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinationsV1.ll +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-AllValidFlagCombinationsV1.ll @@ -11,33 +11,40 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } !dx.rootsignatures = !{!2} ; list of function/root signature pairs !2 = !{ ptr @main, !3, i32 1 } ; function, root signature -!3 = !{ !5 } ; list of root signature elements -!5 = !{ !"DescriptorTable", i32 0, !6, !7 } +!3 = !{ !5, !8 } ; list of root signature elements +!5 = !{ !"DescriptorTable", i32 0, !6 } !6 = !{ !"Sampler", i32 1, i32 1, i32 0, i32 -1, i32 1 } +!8 = !{ !"DescriptorTable", i32 0, !7 } !7 = !{ !"UAV", i32 5, i32 1, i32 10, i32 5, i32 3 } ; DXC: - Name: RTS0 -; DXC-NEXT: Size: 84 +; DXC-NEXT: Size: 104 ; DXC-NEXT: RootSignature: ; DXC-NEXT: Version: 1 -; DXC-NEXT: NumRootParameters: 1 +; DXC-NEXT: NumRootParameters: 2 ; DXC-NEXT: RootParametersOffset: 24 ; DXC-NEXT: NumStaticSamplers: 0 -; DXC-NEXT: StaticSamplersOffset: 84 +; DXC-NEXT: StaticSamplersOffset: 104 ; DXC-NEXT: Parameters: ; DXC-NEXT: - ParameterType: DescriptorTable ; DXC-NEXT: ShaderVisibility: All ; DXC-NEXT: Table: -; DXC-NEXT: NumRanges: 2 -; DXC-NEXT: RangesOffset: 44 +; DXC-NEXT: NumRanges: 1 +; DXC-NEXT: RangesOffset: 56 ; DXC-NEXT: Ranges: ; DXC-NEXT: - RangeType: Sampler ; DXC-NEXT: NumDescriptors: 1 ; DXC-NEXT: BaseShaderRegister: 1 ; DXC-NEXT: RegisterSpace: 0 ; DXC-NEXT: OffsetInDescriptorsFromTableStart: 4294967295 -; DXC-NEXT: - RangeType: UAV +; DXC-NEXT: - ParameterType: DescriptorTable +; DXC-NEXT: ShaderVisibility: All +; DXC-NEXT: Table: +; DXC-NEXT: NumRanges: 1 +; DXC-NEXT: RangesOffset: 84 +; DXC-NEXT: Ranges: +; DXC-NEXT: - RangeType: UAV ; DXC-NEXT: NumDescriptors: 5 ; DXC-NEXT: BaseShaderRegister: 1 ; DXC-NEXT: RegisterSpace: 10 diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Invalid-Version.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Invalid-Version.ll new file mode 100644 index 0000000000000..26867e6d7ec25 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Invalid-Version.ll @@ -0,0 +1,20 @@ +; RUN: not opt -passes='print' %s -S -o - 2>&1 | FileCheck %s + +target triple = "dxil-unknown-shadermodel6.0-compute" + + +; CHECK: error: Invalid value for Version: 4 +; CHECK-NOT: Root Signature Definitions +define void @main() #0 { +entry: + ret void +} +attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } + + +!dx.rootsignatures = !{!2, !3, !4, !5} ; list of function/root signature pairs +!2 = !{ ptr @main, !6, i32 1 } ; function, root signature +!3 = !{ ptr @main, !6, i32 4 } ; function, root signature +!4 = !{ ptr @main, !6, i32 2 } ; function, root signature +!5 = !{ ptr @main, !6, i32 3 } ; function, root signature +!6 = !{ } ; list of root signature elements diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-appending-limits.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-appending-limits.ll new file mode 100644 index 0000000000000..7fa42e9697898 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-appending-limits.ll @@ -0,0 +1,16 @@ +; RUN: opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; A descriptor range can be placed at UINT_MAX, matching DXC's behaviour +; CHECK-NOT: error: + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5} +!4 = !{!"UAV", i32 1, i32 1, i32 0, i32 4294967294, i32 0} +!5 = !{!"UAV", i32 1, i32 0, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-deny-no-binding.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-deny-no-binding.ll new file mode 100644 index 0000000000000..15326d438f021 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-deny-no-binding.ll @@ -0,0 +1,17 @@ +; RUN: opt -S -passes='dxil-post-optimization-validation' %s +; This is a valid case where no resource is being used +target triple = "dxil-pc-shadermodel6.6-pixel" + +define void @CSMain() #0 { +entry: + ret void +} +attributes #0 = { noinline nounwind "exp-shader"="cs" "hlsl.numthreads"="1,2,1" "hlsl.shader"="geometry" } + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3, !4} +!2 = !{!"RootConstants", i32 0, i32 2, i32 0, i32 4} +!3 = !{ !"RootFlags", i32 294 } ; 294 = deny_pixel/hull/vertex/amplification_shader_root_access +!4 = !{ !"RootSRV", i32 0, i32 1, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-limits-multiples.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-limits-multiples.ll new file mode 100644 index 0000000000000..e51f15a1d3fc2 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-limits-multiples.ll @@ -0,0 +1,16 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; CHECK: error: Offset overflow for descriptor range: CBV(register=2, space=0). + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5, !6} +!4 = !{!"CBV", i32 1, i32 0, i32 0, i32 4294967294, i32 0} +!5 = !{!"CBV", i32 1, i32 1, i32 0, i32 -1, i32 0} +!6 = !{!"CBV", i32 1, i32 2, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-overflow.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-overflow.ll new file mode 100644 index 0000000000000..1bc97d9ae2091 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-appending-overflow.ll @@ -0,0 +1,17 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; This test checks if a resource is implicitly overflowing. That means, it is appending a resource after an unbounded range. + +; CHECK: error: Range UAV(register=0, space=0) cannot be appended after an unbounded range + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5} +!4 = !{!"UAV", i32 -1, i32 1, i32 0, i32 2, i32 0} +!5 = !{!"UAV", i32 1, i32 0, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-multiple-shader.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-multiple-shader.ll new file mode 100644 index 0000000000000..b11cce694bd25 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-multiple-shader.ll @@ -0,0 +1,20 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' %s 2>&1 | FileCheck %s +; CHECK: error: Shader has root bindings but root signature uses a DENY flag to disallow root binding access to the shader stage. +target triple = "dxil-pc-shadermodel6.6-pixel" + +%__cblayout_CB = type <{ float }> + +@CB.str = private unnamed_addr constant [3 x i8] c"CB\00", align 1 + +define void @CSMain() "hlsl.shader"="compute" { +entry: + %CB = tail call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 4, 0)) @llvm.dx.resource.handlefrombinding(i32 0, i32 2, i32 1, i32 0, ptr nonnull @CB.str) + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3} +!2 = !{!"RootConstants", i32 0, i32 2, i32 0, i32 4} +!3 = !{!"RootFlags", i32 294} ; 294 = deny_pixel/hull/vertex/amplification_shader_root_access diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-root-descriptor.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-root-descriptor.ll new file mode 100644 index 0000000000000..6d323757d5897 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-root-descriptor.ll @@ -0,0 +1,20 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' %s 2>&1 | FileCheck %s + +; CHECK: error: Shader has root bindings but root signature uses a DENY flag to disallow root binding access to the shader stage. +target triple = "dxil-pc-shadermodel6.6-pixel" + +@SB.str = private unnamed_addr constant [3 x i8] c"SB\00", align 1 + +define void @CSMain() "hlsl.shader"="pixel" { +entry: + %SB = tail call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr nonnull @SB.str) + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3} +!2 = !{!"DescriptorTable", i32 0, !4} +!4 = !{!"SRV", i32 1, i32 0, i32 0, i32 -1, i32 4} +!3 = !{!"RootFlags", i32 32} ; 32 = deny_pixel_shader_root_access diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-single-shader.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-single-shader.ll new file mode 100644 index 0000000000000..4e50f50049b0e --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-deny-single-shader.ll @@ -0,0 +1,19 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' %s 2>&1 | FileCheck %s + +; CHECK: error: Shader has root bindings but root signature uses a DENY flag to disallow root binding access to the shader stage. +target triple = "dxil-pc-shadermodel6.6-pixel" + +@SB.str = private unnamed_addr constant [3 x i8] c"SB\00", align 1 + +define void @CSMain() "hlsl.shader"="pixel" { +entry: + %SB = tail call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr nonnull @SB.str) + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3} +!2 = !{!"RootSRV", i32 0, i32 0, i32 0, i32 4} +!3 = !{!"RootFlags", i32 32} ; 32 = deny_pixel_shader_root_access diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-offset-overflow.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-offset-overflow.ll new file mode 100644 index 0000000000000..6e56949562740 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-offset-overflow.ll @@ -0,0 +1,15 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; CHECK: error: Offset overflow for descriptor range: UAV(register=0, space=0). + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5} +!4 = !{!"UAV", i32 100, i32 0, i32 0, i32 4294967294, i32 0} +!5 = !{!"UAV", i32 1, i32 101, i32 0, i32 10, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-register-overflow.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-register-overflow.ll new file mode 100644 index 0000000000000..bff1727c18924 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-register-overflow.ll @@ -0,0 +1,13 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; CHECK: error: Overflow for shader register range: UAV(register=4294967295, space=0) +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4} +!4 = !{!"UAV", i32 100, i32 4294967295, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler-mix.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler-mix.ll new file mode 100644 index 0000000000000..95d00619b02a0 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler-mix.ll @@ -0,0 +1,15 @@ +; RUN: not opt -S -passes='dxil-post-optimization-validation' -mtriple=dxil-pc-shadermodel6.6-compute %s 2>&1 | FileCheck %s +; CHECK: error: Samplers cannot be mixed with other resource types in a descriptor table, UAV(location=0) + +define void @CSMain() "hlsl.shader"="compute" { +entry: + ret void +} + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!3} +!3 = !{!"DescriptorTable", i32 0, !4, !5} +!4 = !{!"UAV", i32 1, i32 0, i32 0, i32 -1, i32 0} +!5 = !{!"Sampler", i32 2, i32 0, i32 0, i32 -1, i32 0} diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-not-dening-shader.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-not-dening-shader.ll new file mode 100644 index 0000000000000..775fc3512ca84 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-not-dening-shader.ll @@ -0,0 +1,21 @@ +; RUN: opt -S -passes='dxil-post-optimization-validation' %s +; Valid scenario where shader stage is not blocked from accessing root bindings +target triple = "dxil-pc-shadermodel6.6-geometry" + +%__cblayout_CB = type <{ float }> + +@CB.str = private unnamed_addr constant [3 x i8] c"CB\00", align 1 + +define void @CSMain() "hlsl.shader"="geometry" { +entry: + %CB = tail call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 4, 0)) @llvm.dx.resource.handlefrombinding(i32 0, i32 2, i32 1, i32 0, ptr nonnull @CB.str) + ret void +} +attributes #0 = { noinline nounwind "exp-shader"="cs" "hlsl.numthreads"="1,2,1" "hlsl.shader"="geometry" } + +!dx.rootsignatures = !{!0} + +!0 = !{ptr @CSMain, !1, i32 2} +!1 = !{!2, !3} +!2 = !{ !"RootFlags", i32 294 } ; 294 = deny_pixel/hull/vertex/amplification_shader_root_access +!3 = !{ !"RootCBV", i32 0, i32 2, i32 0, i32 0 } diff --git a/llvm/test/CodeGen/LoongArch/lasx/abs.ll b/llvm/test/CodeGen/LoongArch/lasx/abs.ll new file mode 100644 index 0000000000000..e3b0d04d92d75 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/abs.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +define void @vabs_b(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.b $xr1, $xr0 +; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <32 x i8>, ptr %src + %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 true) + store <32 x i8> %b, ptr %dst + ret void +} + +define void @vabs_b_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.b $xr1, $xr0 +; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <32 x i8>, ptr %src + %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false) + store <32 x i8> %b, ptr %dst + ret void +} + +define void @vabs_h(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.h $xr1, $xr0 +; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i16>, ptr %src + %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 true) + store <16 x i16> %b, ptr %dst + ret void +} + +define void @vabs_h_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.h $xr1, $xr0 +; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i16>, ptr %src + %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false) + store <16 x i16> %b, ptr %dst + ret void +} + +define void @vabs_w(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.w $xr1, $xr0 +; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i32>, ptr %src + %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 true) + store <8 x i32> %b, ptr %dst + ret void +} + +define void @vabs_w_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.w $xr1, $xr0 +; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i32>, ptr %src + %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false) + store <8 x i32> %b, ptr %dst + ret void +} + +define void @vabs_d(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.d $xr1, $xr0 +; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i64>, ptr %src + %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 true) + store <4 x i64> %b, ptr %dst + ret void +} + +define void @vabs_d_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvneg.d $xr1, $xr0 +; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i64>, ptr %src + %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false) + store <4 x i64> %b, ptr %dst + ret void +} + +declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1) +declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1) +declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1) +declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll index 5dfc108620b67..d09ef0e2c6ac0 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll @@ -196,8 +196,7 @@ entry: define void @buildvector_v8f32_const_splat(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v8f32_const_splat: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 260096 -; CHECK-NEXT: xvreplgr2vr.w $xr0, $a1 +; CHECK-NEXT: xvldi $xr0, -1424 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -207,19 +206,11 @@ entry: ;; Also check buildvector_const_splat_xvldi_1100. define void @buildvector_v4f64_const_splat(ptr %dst) nounwind { -; LA32-LABEL: buildvector_v4f64_const_splat: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI14_0) -; LA32-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI14_0) -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: buildvector_v4f64_const_splat: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu52i.d $a1, $zero, 1023 -; LA64-NEXT: xvreplgr2vr.d $xr0, $a1 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: buildvector_v4f64_const_splat: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -912 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret entry: store <4 x double> , ptr %dst ret void @@ -229,8 +220,7 @@ entry: define void @buildvector_const_splat_xvldi_0001(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_xvldi_0001: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ori $a1, $zero, 768 -; CHECK-NEXT: xvreplgr2vr.w $xr0, $a1 +; CHECK-NEXT: xvldi $xr0, -3837 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -241,8 +231,7 @@ entry: define void @buildvector_const_splat_xvldi_0010(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_xvldi_0010: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 16 -; CHECK-NEXT: xvreplgr2vr.w $xr0, $a1 +; CHECK-NEXT: xvldi $xr0, -3583 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -253,8 +242,7 @@ entry: define void @buildvector_const_splat_xvldi_0011(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_xvldi_0011: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 4096 -; CHECK-NEXT: xvreplgr2vr.w $xr0, $a1 +; CHECK-NEXT: xvldi $xr0, -3327 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -265,8 +253,7 @@ entry: define void @buildvector_const_splat_xvldi_0101(ptr %dst) { ; CHECK-LABEL: buildvector_const_splat_xvldi_0101: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ori $a1, $zero, 768 -; CHECK-NEXT: xvreplgr2vr.h $xr0, $a1 +; CHECK-NEXT: xvldi $xr0, -2813 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -277,8 +264,7 @@ entry: define void @buildvector_const_splat_xvldi_0110(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_xvldi_0110: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ori $a1, $zero, 1023 -; CHECK-NEXT: xvreplgr2vr.w $xr0, $a1 +; CHECK-NEXT: xvldi $xr0, -2557 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -289,9 +275,7 @@ entry: define void @buildvector_const_splat_xvldi_0111(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_xvldi_0111: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 15 -; CHECK-NEXT: ori $a1, $a1, 4095 -; CHECK-NEXT: xvreplgr2vr.w $xr0, $a1 +; CHECK-NEXT: xvldi $xr0, -2305 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -300,39 +284,22 @@ entry: } define void @buildvector_const_splat_xvldi_1001(ptr %dst) nounwind { -; LA32-LABEL: buildvector_const_splat_xvldi_1001: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI21_0) -; LA32-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI21_0) -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: buildvector_const_splat_xvldi_1001: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu12i.w $a1, 15 -; LA64-NEXT: ori $a1, $a1, 4095 -; LA64-NEXT: xvreplgr2vr.d $xr0, $a1 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: buildvector_const_splat_xvldi_1001: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -1789 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret entry: store <8 x i32> , ptr %dst ret void } define void @buildvector_const_splat_xvldi_1011(ptr %dst) nounwind { -; LA32-LABEL: buildvector_const_splat_xvldi_1011: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI22_0) -; LA32-NEXT: xvld $xr0, $a1, %pc_lo12(.LCPI22_0) -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: buildvector_const_splat_xvldi_1011: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu12i.w $a1, 262144 -; LA64-NEXT: xvreplgr2vr.d $xr0, $a1 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: buildvector_const_splat_xvldi_1011: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvldi $xr0, -1280 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret entry: store <8 x float> , ptr %dst ret void @@ -1626,8 +1593,7 @@ define void @buildvector_v8f32_with_constant(ptr %dst, float %a1, float %a2, flo ; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2 ; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1 ; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0 -; CHECK-NEXT: lu12i.w $a1, 262144 -; CHECK-NEXT: xvreplgr2vr.w $xr4, $a1 +; CHECK-NEXT: xvldi $xr4, -3264 ; CHECK-NEXT: xvinsve0.w $xr4, $xr0, 1 ; CHECK-NEXT: xvinsve0.w $xr4, $xr1, 2 ; CHECK-NEXT: xvinsve0.w $xr4, $xr2, 5 diff --git a/llvm/test/CodeGen/LoongArch/lasx/fdiv-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/lasx/fdiv-reciprocal-estimate.ll index 7514dafa8000b..d75985b1ac215 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fdiv-reciprocal-estimate.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fdiv-reciprocal-estimate.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA32 -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lasx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA64 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch64 --mattr=+lasx,+frecipe < %s | FileCheck %s define void @fdiv_v8f32(ptr %res, ptr %a0, ptr %a1) nounwind { ; FAULT-LABEL: fdiv_v8f32: @@ -40,35 +40,19 @@ define void @fdiv_v4f64(ptr %res, ptr %a0, ptr %a1) nounwind { ; FAULT-NEXT: xvst $xr0, $a0, 0 ; FAULT-NEXT: ret ; -; LA32-LABEL: fdiv_v4f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a3, %pc_hi20(.LCPI1_0) -; LA32-NEXT: xvld $xr0, $a2, 0 -; LA32-NEXT: xvld $xr1, $a3, %pc_lo12(.LCPI1_0) -; LA32-NEXT: xvld $xr2, $a1, 0 -; LA32-NEXT: xvfrecipe.d $xr3, $xr0 -; LA32-NEXT: xvfmadd.d $xr1, $xr0, $xr3, $xr1 -; LA32-NEXT: xvfnmsub.d $xr1, $xr1, $xr3, $xr3 -; LA32-NEXT: xvfmul.d $xr3, $xr2, $xr1 -; LA32-NEXT: xvfnmsub.d $xr0, $xr0, $xr3, $xr2 -; LA32-NEXT: xvfmadd.d $xr0, $xr1, $xr0, $xr3 -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: fdiv_v4f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: xvld $xr0, $a2, 0 -; LA64-NEXT: xvld $xr1, $a1, 0 -; LA64-NEXT: lu52i.d $a1, $zero, -1025 -; LA64-NEXT: xvreplgr2vr.d $xr2, $a1 -; LA64-NEXT: xvfrecipe.d $xr3, $xr0 -; LA64-NEXT: xvfmadd.d $xr2, $xr0, $xr3, $xr2 -; LA64-NEXT: xvfnmsub.d $xr2, $xr2, $xr3, $xr3 -; LA64-NEXT: xvfmul.d $xr3, $xr1, $xr2 -; LA64-NEXT: xvfnmsub.d $xr0, $xr0, $xr3, $xr1 -; LA64-NEXT: xvfmadd.d $xr0, $xr2, $xr0, $xr3 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: fdiv_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a2, 0 +; CHECK-NEXT: xvld $xr1, $a1, 0 +; CHECK-NEXT: xvfrecipe.d $xr2, $xr0 +; CHECK-NEXT: xvldi $xr3, -784 +; CHECK-NEXT: xvfmadd.d $xr3, $xr0, $xr2, $xr3 +; CHECK-NEXT: xvfnmsub.d $xr2, $xr3, $xr2, $xr2 +; CHECK-NEXT: xvfmul.d $xr3, $xr1, $xr2 +; CHECK-NEXT: xvfnmsub.d $xr0, $xr0, $xr3, $xr1 +; CHECK-NEXT: xvfmadd.d $xr0, $xr2, $xr0, $xr3 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret entry: %v0 = load <4 x double>, ptr %a0 %v1 = load <4 x double>, ptr %a1 @@ -90,8 +74,7 @@ define void @one_fdiv_v8f32(ptr %res, ptr %a0) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvfrecipe.s $xr1, $xr0 -; CHECK-NEXT: lu12i.w $a1, -264192 -; CHECK-NEXT: xvreplgr2vr.w $xr2, $a1 +; CHECK-NEXT: xvldi $xr2, -1296 ; CHECK-NEXT: xvfmadd.s $xr0, $xr0, $xr1, $xr2 ; CHECK-NEXT: xvfnmsub.s $xr0, $xr0, $xr1, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 @@ -107,24 +90,22 @@ define void @one_fdiv_v4f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA32-LABEL: one_fdiv_v4f64: ; FAULT-LA32: # %bb.0: # %entry ; FAULT-LA32-NEXT: xvld $xr0, $a1, 0 -; FAULT-LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; FAULT-LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI3_0) +; FAULT-LA32-NEXT: xvldi $xr1, -912 ; FAULT-LA32-NEXT: xvfdiv.d $xr0, $xr1, $xr0 ; FAULT-LA32-NEXT: xvst $xr0, $a0, 0 ; FAULT-LA32-NEXT: ret ; -; LA32-LABEL: one_fdiv_v4f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: xvld $xr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI3_0) -; LA32-NEXT: xvfrecipe.d $xr2, $xr0 -; LA32-NEXT: xvfnmsub.d $xr3, $xr0, $xr2, $xr1 -; LA32-NEXT: xvfmadd.d $xr2, $xr2, $xr3, $xr2 -; LA32-NEXT: xvfnmsub.d $xr0, $xr0, $xr2, $xr1 -; LA32-NEXT: xvfmadd.d $xr0, $xr2, $xr0, $xr2 -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret +; CHECK-LABEL: one_fdiv_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvfrecipe.d $xr1, $xr0 +; CHECK-NEXT: xvldi $xr2, -912 +; CHECK-NEXT: xvfnmsub.d $xr3, $xr0, $xr1, $xr2 +; CHECK-NEXT: xvfmadd.d $xr1, $xr1, $xr3, $xr1 +; CHECK-NEXT: xvfnmsub.d $xr0, $xr0, $xr1, $xr2 +; CHECK-NEXT: xvfmadd.d $xr0, $xr1, $xr0, $xr1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret ; ; FAULT-LA64-LABEL: one_fdiv_v4f64: ; FAULT-LA64: # %bb.0: # %entry @@ -132,19 +113,6 @@ define void @one_fdiv_v4f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA64-NEXT: xvfrecip.d $xr0, $xr0 ; FAULT-LA64-NEXT: xvst $xr0, $a0, 0 ; FAULT-LA64-NEXT: ret -; -; LA64-LABEL: one_fdiv_v4f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: xvld $xr0, $a1, 0 -; LA64-NEXT: xvfrecipe.d $xr1, $xr0 -; LA64-NEXT: lu52i.d $a1, $zero, 1023 -; LA64-NEXT: xvreplgr2vr.d $xr2, $a1 -; LA64-NEXT: xvfnmsub.d $xr3, $xr0, $xr1, $xr2 -; LA64-NEXT: xvfmadd.d $xr1, $xr1, $xr3, $xr1 -; LA64-NEXT: xvfnmsub.d $xr0, $xr0, $xr1, $xr2 -; LA64-NEXT: xvfmadd.d $xr0, $xr1, $xr0, $xr1 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret entry: %v0 = load <4 x double>, ptr %a0 %div = fdiv fast <4 x double> , %v0 diff --git a/llvm/test/CodeGen/LoongArch/lasx/fsqrt-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/lasx/fsqrt-reciprocal-estimate.ll index 4e475daa8ced3..e696129acb862 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fsqrt-reciprocal-estimate.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fsqrt-reciprocal-estimate.ll @@ -63,11 +63,9 @@ define void @one_div_sqrt_v8f32(ptr %res, ptr %a0) nounwind { ; LA32-NEXT: xvfrsqrte.s $xr1, $xr0 ; LA32-NEXT: xvfmul.s $xr1, $xr0, $xr1 ; LA32-NEXT: xvfmul.s $xr0, $xr0, $xr1 -; LA32-NEXT: lu12i.w $a1, -261120 -; LA32-NEXT: xvreplgr2vr.w $xr2, $a1 +; LA32-NEXT: xvldi $xr2, -1400 ; LA32-NEXT: xvfmadd.s $xr0, $xr0, $xr1, $xr2 -; LA32-NEXT: lu12i.w $a1, -266240 -; LA32-NEXT: xvreplgr2vr.w $xr2, $a1 +; LA32-NEXT: xvldi $xr2, -3137 ; LA32-NEXT: xvfmul.s $xr1, $xr1, $xr2 ; LA32-NEXT: xvfmul.s $xr0, $xr1, $xr0 ; LA32-NEXT: xvst $xr0, $sp, 64 @@ -100,11 +98,9 @@ define void @one_div_sqrt_v8f32(ptr %res, ptr %a0) nounwind { ; LA64-NEXT: xvfrsqrte.s $xr1, $xr0 ; LA64-NEXT: xvfmul.s $xr1, $xr0, $xr1 ; LA64-NEXT: xvfmul.s $xr0, $xr0, $xr1 -; LA64-NEXT: lu12i.w $a1, -261120 -; LA64-NEXT: xvreplgr2vr.w $xr2, $a1 +; LA64-NEXT: xvldi $xr2, -1400 ; LA64-NEXT: xvfmadd.s $xr0, $xr0, $xr1, $xr2 -; LA64-NEXT: lu12i.w $a1, -266240 -; LA64-NEXT: xvreplgr2vr.w $xr2, $a1 +; LA64-NEXT: xvldi $xr2, -3137 ; LA64-NEXT: xvfmul.s $xr1, $xr1, $xr2 ; LA64-NEXT: xvfmul.s $xr0, $xr1, $xr0 ; LA64-NEXT: xvst $xr0, $a0, 0 @@ -136,9 +132,8 @@ define void @one_div_sqrt_v4f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA32-NEXT: ld.w $a1, $a1, 0 ; FAULT-LA32-NEXT: st.w $a1, $sp, 32 ; FAULT-LA32-NEXT: xvld $xr0, $sp, 32 -; FAULT-LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0) -; FAULT-LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI1_0) ; FAULT-LA32-NEXT: xvfsqrt.d $xr0, $xr0 +; FAULT-LA32-NEXT: xvldi $xr1, -912 ; FAULT-LA32-NEXT: xvfdiv.d $xr0, $xr1, $xr0 ; FAULT-LA32-NEXT: xvst $xr0, $sp, 64 ; FAULT-LA32-NEXT: vld $vr0, $sp, 80 @@ -176,18 +171,16 @@ define void @one_div_sqrt_v4f64(ptr %res, ptr %a0) nounwind { ; LA32-NEXT: st.w $a1, $sp, 32 ; LA32-NEXT: xvld $xr0, $sp, 32 ; LA32-NEXT: xvfrsqrte.d $xr1, $xr0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0) -; LA32-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI1_0) -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_1) -; LA32-NEXT: xvld $xr3, $a1, %pc_lo12(.LCPI1_1) ; LA32-NEXT: xvfmul.d $xr1, $xr0, $xr1 -; LA32-NEXT: xvfmul.d $xr4, $xr0, $xr1 -; LA32-NEXT: xvfmadd.d $xr4, $xr4, $xr1, $xr2 -; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr3 +; LA32-NEXT: xvfmul.d $xr2, $xr0, $xr1 +; LA32-NEXT: xvldi $xr3, -888 +; LA32-NEXT: xvfmadd.d $xr2, $xr2, $xr1, $xr3 +; LA32-NEXT: xvldi $xr4, -800 ; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr4 +; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr2 ; LA32-NEXT: xvfmul.d $xr0, $xr0, $xr1 -; LA32-NEXT: xvfmadd.d $xr0, $xr0, $xr1, $xr2 -; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr3 +; LA32-NEXT: xvfmadd.d $xr0, $xr0, $xr1, $xr3 +; LA32-NEXT: xvfmul.d $xr1, $xr1, $xr4 ; LA32-NEXT: xvfmul.d $xr0, $xr1, $xr0 ; LA32-NEXT: xvst $xr0, $sp, 64 ; LA32-NEXT: vld $vr0, $sp, 80 @@ -219,13 +212,9 @@ define void @one_div_sqrt_v4f64(ptr %res, ptr %a0) nounwind { ; LA64-NEXT: xvfrsqrte.d $xr1, $xr0 ; LA64-NEXT: xvfmul.d $xr1, $xr0, $xr1 ; LA64-NEXT: xvfmul.d $xr2, $xr0, $xr1 -; LA64-NEXT: ori $a1, $zero, 0 -; LA64-NEXT: lu32i.d $a1, -524288 -; LA64-NEXT: lu52i.d $a1, $a1, -1024 -; LA64-NEXT: xvreplgr2vr.d $xr3, $a1 +; LA64-NEXT: xvldi $xr3, -888 ; LA64-NEXT: xvfmadd.d $xr2, $xr2, $xr1, $xr3 -; LA64-NEXT: lu52i.d $a1, $zero, -1026 -; LA64-NEXT: xvreplgr2vr.d $xr4, $a1 +; LA64-NEXT: xvldi $xr4, -800 ; LA64-NEXT: xvfmul.d $xr1, $xr1, $xr4 ; LA64-NEXT: xvfmul.d $xr1, $xr1, $xr2 ; LA64-NEXT: xvfmul.d $xr0, $xr0, $xr1 diff --git a/llvm/test/CodeGen/LoongArch/lasx/fsqrt.ll b/llvm/test/CodeGen/LoongArch/lasx/fsqrt.ll index f8a3284f04dc8..9ae651d612f18 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/fsqrt.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/fsqrt.ll @@ -174,9 +174,8 @@ define void @one_div_sqrt_v4f64(ptr %res, ptr %a0) nounwind { ; LA32-NEXT: ld.w $a1, $a1, 0 ; LA32-NEXT: st.w $a1, $sp, 32 ; LA32-NEXT: xvld $xr0, $sp, 32 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI3_0) ; LA32-NEXT: xvfsqrt.d $xr0, $xr0 +; LA32-NEXT: xvldi $xr1, -912 ; LA32-NEXT: xvfdiv.d $xr0, $xr1, $xr0 ; LA32-NEXT: xvst $xr0, $sp, 64 ; LA32-NEXT: vld $vr0, $sp, 80 diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll index e66a15291fb18..98687755fcfb4 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/adda.ll @@ -7,11 +7,7 @@ define void @vadda_b(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.b $xr2, $xr0 -; CHECK-NEXT: xvmax.b $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.b $xr2, $xr1 -; CHECK-NEXT: xvmax.b $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.b $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -33,11 +29,7 @@ define void @vadda_h(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.h $xr2, $xr0 -; CHECK-NEXT: xvmax.h $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.h $xr2, $xr1 -; CHECK-NEXT: xvmax.h $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.h $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -59,11 +51,7 @@ define void @vadda_w(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.w $xr2, $xr0 -; CHECK-NEXT: xvmax.w $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.w $xr2, $xr1 -; CHECK-NEXT: xvmax.w $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.w $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -85,11 +73,7 @@ define void @vadda_d(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xvld $xr0, $a1, 0 ; CHECK-NEXT: xvld $xr1, $a2, 0 -; CHECK-NEXT: xvneg.d $xr2, $xr0 -; CHECK-NEXT: xvmax.d $xr0, $xr0, $xr2 -; CHECK-NEXT: xvneg.d $xr2, $xr1 -; CHECK-NEXT: xvmax.d $xr1, $xr1, $xr2 -; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1 +; CHECK-NEXT: xvadda.d $xr0, $xr0, $xr1 ; CHECK-NEXT: xvst $xr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll index ae6f091ddb498..aefaa0efb079c 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fdiv.ll @@ -53,8 +53,7 @@ define void @one_fdiv_v4f64(ptr %res, ptr %a0) nounwind { ; LA32-LABEL: one_fdiv_v4f64: ; LA32: # %bb.0: # %entry ; LA32-NEXT: xvld $xr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: xvld $xr1, $a1, %pc_lo12(.LCPI3_0) +; LA32-NEXT: xvldi $xr1, -912 ; LA32-NEXT: xvfdiv.d $xr0, $xr1, $xr0 ; LA32-NEXT: xvst $xr0, $a0, 0 ; LA32-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll new file mode 100644 index 0000000000000..e1784f81c2a07 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll @@ -0,0 +1,197 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +;; xvinsve0.w +define void @xvinsve0_v8i32_l_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_l_4(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 4 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> + store <8 x float> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_6(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_6: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> + store <8 x float> %vc, ptr %d + ret void +} + +;; xvinsve0.d +define void @xvinsve0_v4i64_l_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_l_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> + store <4 x double> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> + store <4 x double> %vc, ptr %d + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll new file mode 100644 index 0000000000000..39ac647d6875c --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s + +define <8 x float> @fadd_elt0_v8f32(float %a) nounwind { +; CHECK-LABEL: fadd_elt0_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <8 x float> poison, float %a, i32 0 + %c = fadd <8 x float> %b, + ret <8 x float> %c +} + +define <4 x double> @fadd_elt0_v4f64(double %a) nounwind { +; CHECK-LABEL: fadd_elt0_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -912 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <4 x double> poison, double %a, i32 0 + %c = fadd <4 x double> %b, + ret <4 x double> %c +} + +define <8 x float> @fsub_splat_v8f32(float %a, float %b) nounwind { +; CHECK-LABEL: fsub_splat_v8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsub.s $fa0, $fa0, $fa1 +; CHECK-NEXT: xvreplve0.w $xr0, $xr0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <8 x float> poison, float %a, i32 0 + %insb = insertelement <8 x float> poison, float %b, i32 0 + %va = shufflevector <8 x float> %insa, <8 x float> poison, <8 x i32> zeroinitializer + %vb = shufflevector <8 x float> %insb, <8 x float> poison, <8 x i32> zeroinitializer + %c = fsub <8 x float> %va, %vb + ret <8 x float> %c +} + +define <4 x double> @fsub_splat_v4f64(double %a) nounwind { +; CHECK-LABEL: fsub_splat_v4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -784 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: xvreplve0.d $xr0, $xr0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <4 x double> poison, double %a, i32 0 + %insb = insertelement <4 x double> poison, double 1.0, i32 0 + %va = shufflevector <4 x double> %insa, <4 x double> poison, <4 x i32> zeroinitializer + %vb = shufflevector <4 x double> %insb, <4 x double> poison, <4 x i32> zeroinitializer + %c = fsub <4 x double> %va, %vb + ret <4 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/lasx/vselect.ll b/llvm/test/CodeGen/LoongArch/lasx/vselect.ll index 44e4f71c8d08d..bf31ccb1d0104 100644 --- a/llvm/test/CodeGen/LoongArch/lasx/vselect.ll +++ b/llvm/test/CodeGen/LoongArch/lasx/vselect.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA32 -; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s define void @select_v32i8_imm(ptr %res, ptr %a0) nounwind { ; CHECK-LABEL: select_v32i8_imm: @@ -50,26 +50,14 @@ define void @select_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind { } define void @select_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind { -; LA32-LABEL: select_v8i32: -; LA32: # %bb.0: -; LA32-NEXT: xvld $xr0, $a1, 0 -; LA32-NEXT: xvld $xr1, $a2, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: xvld $xr2, $a1, %pc_lo12(.LCPI3_0) -; LA32-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 -; LA32-NEXT: xvst $xr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: select_v8i32: -; LA64: # %bb.0: -; LA64-NEXT: xvld $xr0, $a1, 0 -; LA64-NEXT: xvld $xr1, $a2, 0 -; LA64-NEXT: ori $a1, $zero, 0 -; LA64-NEXT: lu32i.d $a1, -1 -; LA64-NEXT: xvreplgr2vr.d $xr2, $a1 -; LA64-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 -; LA64-NEXT: xvst $xr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: select_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvldi $xr2, -1552 +; CHECK-NEXT: xvbitsel.v $xr0, $xr1, $xr0, $xr2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret %v0 = load <8 x i32>, ptr %a0 %v1 = load <8 x i32>, ptr %a1 %sel = select <8 x i1> , <8 x i32> %v0, <8 x i32> %v1 diff --git a/llvm/test/CodeGen/LoongArch/lsx/abs.ll b/llvm/test/CodeGen/LoongArch/lsx/abs.ll new file mode 100644 index 0000000000000..85fe1fe5c0da7 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/abs.ll @@ -0,0 +1,128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s + +define void @vabs_b(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.b $vr1, $vr0 +; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i8>, ptr %src + %b = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 true) + store <16 x i8> %b, ptr %dst + ret void +} + +define void @vabs_b_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_b_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.b $vr1, $vr0 +; CHECK-NEXT: vmax.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <16 x i8>, ptr %src + %b = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false) + store <16 x i8> %b, ptr %dst + ret void +} + +define void @vabs_h(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.h $vr1, $vr0 +; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i16>, ptr %src + %b = tail call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 true) + store <8 x i16> %b, ptr %dst + ret void +} + +define void @vabs_h_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.h $vr1, $vr0 +; CHECK-NEXT: vmax.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <8 x i16>, ptr %src + %b = tail call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false) + store <8 x i16> %b, ptr %dst + ret void +} + +define void @vabs_w(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.w $vr1, $vr0 +; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i32>, ptr %src + %b = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 true) + store <4 x i32> %b, ptr %dst + ret void +} + +define void @vabs_w_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_w_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.w $vr1, $vr0 +; CHECK-NEXT: vmax.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <4 x i32>, ptr %src + %b = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false) + store <4 x i32> %b, ptr %dst + ret void +} + +define void @vabs_d(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.d $vr1, $vr0 +; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <2 x i64>, ptr %src + %b = tail call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 true) + store <2 x i64> %b, ptr %dst + ret void +} + +define void @vabs_d_1(ptr %dst, ptr %src) { +; CHECK-LABEL: vabs_d_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vneg.d $vr1, $vr0 +; CHECK-NEXT: vmax.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %a = load <2 x i64>, ptr %src + %b = tail call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false) + store <2 x i64> %b, ptr %dst + ret void +} + +declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) +declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) +declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1) +declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) diff --git a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll index 373f2b19c2a46..fe45e73b36f51 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll @@ -196,8 +196,7 @@ entry: define void @buildvector_v4f32_const_splat(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_v4f32_const_splat: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 260096 -; CHECK-NEXT: vreplgr2vr.w $vr0, $a1 +; CHECK-NEXT: vldi $vr0, -1424 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -207,19 +206,11 @@ entry: ;; Also check buildvector_const_splat_vldi_1100. define void @buildvector_v2f64_const_splat(ptr %dst) nounwind { -; LA32-LABEL: buildvector_v2f64_const_splat: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI14_0) -; LA32-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI14_0) -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: buildvector_v2f64_const_splat: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu52i.d $a1, $zero, 1023 -; LA64-NEXT: vreplgr2vr.d $vr0, $a1 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: buildvector_v2f64_const_splat: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -912 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret entry: store <2 x double> , ptr %dst ret void @@ -229,8 +220,7 @@ entry: define void @buildvector_const_splat_vldi_0001(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_vldi_0001: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ori $a1, $zero, 768 -; CHECK-NEXT: vreplgr2vr.w $vr0, $a1 +; CHECK-NEXT: vldi $vr0, -3837 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -241,8 +231,7 @@ entry: define void @buildvector_const_splat_vldi_0010(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_vldi_0010: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 16 -; CHECK-NEXT: vreplgr2vr.w $vr0, $a1 +; CHECK-NEXT: vldi $vr0, -3583 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -253,8 +242,7 @@ entry: define void @buildvector_const_splat_vldi_0011(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_vldi_0011: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 4096 -; CHECK-NEXT: vreplgr2vr.w $vr0, $a1 +; CHECK-NEXT: vldi $vr0, -3327 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -265,8 +253,7 @@ entry: define void @buildvector_const_splat_vldi_0101(ptr %dst) { ; CHECK-LABEL: buildvector_const_splat_vldi_0101: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ori $a1, $zero, 768 -; CHECK-NEXT: vreplgr2vr.h $vr0, $a1 +; CHECK-NEXT: vldi $vr0, -2813 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -277,8 +264,7 @@ entry: define void @buildvector_const_splat_vldi_0110(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_vldi_0110: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: ori $a1, $zero, 1023 -; CHECK-NEXT: vreplgr2vr.w $vr0, $a1 +; CHECK-NEXT: vldi $vr0, -2557 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -289,9 +275,7 @@ entry: define void @buildvector_const_splat_vldi_0111(ptr %dst) nounwind { ; CHECK-LABEL: buildvector_const_splat_vldi_0111: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: lu12i.w $a1, 15 -; CHECK-NEXT: ori $a1, $a1, 4095 -; CHECK-NEXT: vreplgr2vr.w $vr0, $a1 +; CHECK-NEXT: vldi $vr0, -2305 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -300,39 +284,22 @@ entry: } define void @buildvector_const_splat_vldi_1001(ptr %dst) nounwind { -; LA32-LABEL: buildvector_const_splat_vldi_1001: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI21_0) -; LA32-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI21_0) -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: buildvector_const_splat_vldi_1001: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu12i.w $a1, 15 -; LA64-NEXT: ori $a1, $a1, 4095 -; LA64-NEXT: vreplgr2vr.d $vr0, $a1 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: buildvector_const_splat_vldi_1001: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -1789 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret entry: store <4 x i32> , ptr %dst ret void } define void @buildvector_const_splat_vldi_1011(ptr %dst) nounwind { -; LA32-LABEL: buildvector_const_splat_vldi_1011: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI22_0) -; LA32-NEXT: vld $vr0, $a1, %pc_lo12(.LCPI22_0) -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: buildvector_const_splat_vldi_1011: -; LA64: # %bb.0: # %entry -; LA64-NEXT: lu12i.w $a1, 262144 -; LA64-NEXT: vreplgr2vr.d $vr0, $a1 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: buildvector_const_splat_vldi_1011: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr0, -1280 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret entry: store <4 x float> , ptr %dst ret void diff --git a/llvm/test/CodeGen/LoongArch/lsx/fdiv-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/lsx/fdiv-reciprocal-estimate.ll index 58e16d37ae278..46eb91e4079bf 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/fdiv-reciprocal-estimate.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/fdiv-reciprocal-estimate.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA32 -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA64 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s define void @fdiv_v4f32(ptr %res, ptr %a0, ptr %a1) nounwind { ; FAULT-LABEL: fdiv_v4f32: @@ -40,35 +40,19 @@ define void @fdiv_v2f64(ptr %res, ptr %a0, ptr %a1) nounwind { ; FAULT-NEXT: vst $vr0, $a0, 0 ; FAULT-NEXT: ret ; -; LA32-LABEL: fdiv_v2f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: pcalau12i $a3, %pc_hi20(.LCPI1_0) -; LA32-NEXT: vld $vr0, $a2, 0 -; LA32-NEXT: vld $vr1, $a3, %pc_lo12(.LCPI1_0) -; LA32-NEXT: vld $vr2, $a1, 0 -; LA32-NEXT: vfrecipe.d $vr3, $vr0 -; LA32-NEXT: vfmadd.d $vr1, $vr0, $vr3, $vr1 -; LA32-NEXT: vfnmsub.d $vr1, $vr1, $vr3, $vr3 -; LA32-NEXT: vfmul.d $vr3, $vr2, $vr1 -; LA32-NEXT: vfnmsub.d $vr0, $vr0, $vr3, $vr2 -; LA32-NEXT: vfmadd.d $vr0, $vr1, $vr0, $vr3 -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: fdiv_v2f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: vld $vr0, $a2, 0 -; LA64-NEXT: vld $vr1, $a1, 0 -; LA64-NEXT: lu52i.d $a1, $zero, -1025 -; LA64-NEXT: vreplgr2vr.d $vr2, $a1 -; LA64-NEXT: vfrecipe.d $vr3, $vr0 -; LA64-NEXT: vfmadd.d $vr2, $vr0, $vr3, $vr2 -; LA64-NEXT: vfnmsub.d $vr2, $vr2, $vr3, $vr3 -; LA64-NEXT: vfmul.d $vr3, $vr1, $vr2 -; LA64-NEXT: vfnmsub.d $vr0, $vr0, $vr3, $vr1 -; LA64-NEXT: vfmadd.d $vr0, $vr2, $vr0, $vr3 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: fdiv_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a2, 0 +; CHECK-NEXT: vld $vr1, $a1, 0 +; CHECK-NEXT: vfrecipe.d $vr2, $vr0 +; CHECK-NEXT: vldi $vr3, -784 +; CHECK-NEXT: vfmadd.d $vr3, $vr0, $vr2, $vr3 +; CHECK-NEXT: vfnmsub.d $vr2, $vr3, $vr2, $vr2 +; CHECK-NEXT: vfmul.d $vr3, $vr1, $vr2 +; CHECK-NEXT: vfnmsub.d $vr0, $vr0, $vr3, $vr1 +; CHECK-NEXT: vfmadd.d $vr0, $vr2, $vr0, $vr3 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret entry: %v0 = load <2 x double>, ptr %a0 %v1 = load <2 x double>, ptr %a1 @@ -90,8 +74,7 @@ define void @one_fdiv_v4f32(ptr %res, ptr %a0) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vfrecipe.s $vr1, $vr0 -; CHECK-NEXT: lu12i.w $a1, -264192 -; CHECK-NEXT: vreplgr2vr.w $vr2, $a1 +; CHECK-NEXT: vldi $vr2, -1296 ; CHECK-NEXT: vfmadd.s $vr0, $vr0, $vr1, $vr2 ; CHECK-NEXT: vfnmsub.s $vr0, $vr0, $vr1, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 @@ -107,24 +90,22 @@ define void @one_fdiv_v2f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA32-LABEL: one_fdiv_v2f64: ; FAULT-LA32: # %bb.0: # %entry ; FAULT-LA32-NEXT: vld $vr0, $a1, 0 -; FAULT-LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; FAULT-LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI3_0) +; FAULT-LA32-NEXT: vldi $vr1, -912 ; FAULT-LA32-NEXT: vfdiv.d $vr0, $vr1, $vr0 ; FAULT-LA32-NEXT: vst $vr0, $a0, 0 ; FAULT-LA32-NEXT: ret ; -; LA32-LABEL: one_fdiv_v2f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI3_0) -; LA32-NEXT: vfrecipe.d $vr2, $vr0 -; LA32-NEXT: vfnmsub.d $vr3, $vr0, $vr2, $vr1 -; LA32-NEXT: vfmadd.d $vr2, $vr2, $vr3, $vr2 -; LA32-NEXT: vfnmsub.d $vr0, $vr0, $vr2, $vr1 -; LA32-NEXT: vfmadd.d $vr0, $vr2, $vr0, $vr2 -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret +; CHECK-LABEL: one_fdiv_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vfrecipe.d $vr1, $vr0 +; CHECK-NEXT: vldi $vr2, -912 +; CHECK-NEXT: vfnmsub.d $vr3, $vr0, $vr1, $vr2 +; CHECK-NEXT: vfmadd.d $vr1, $vr1, $vr3, $vr1 +; CHECK-NEXT: vfnmsub.d $vr0, $vr0, $vr1, $vr2 +; CHECK-NEXT: vfmadd.d $vr0, $vr1, $vr0, $vr1 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret ; ; FAULT-LA64-LABEL: one_fdiv_v2f64: ; FAULT-LA64: # %bb.0: # %entry @@ -132,19 +113,6 @@ define void @one_fdiv_v2f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA64-NEXT: vfrecip.d $vr0, $vr0 ; FAULT-LA64-NEXT: vst $vr0, $a0, 0 ; FAULT-LA64-NEXT: ret -; -; LA64-LABEL: one_fdiv_v2f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: vld $vr0, $a1, 0 -; LA64-NEXT: vfrecipe.d $vr1, $vr0 -; LA64-NEXT: lu52i.d $a1, $zero, 1023 -; LA64-NEXT: vreplgr2vr.d $vr2, $a1 -; LA64-NEXT: vfnmsub.d $vr3, $vr0, $vr1, $vr2 -; LA64-NEXT: vfmadd.d $vr1, $vr1, $vr3, $vr1 -; LA64-NEXT: vfnmsub.d $vr0, $vr0, $vr1, $vr2 -; LA64-NEXT: vfmadd.d $vr0, $vr1, $vr0, $vr1 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret entry: %v0 = load <2 x double>, ptr %a0 %div = fdiv fast <2 x double> , %v0 diff --git a/llvm/test/CodeGen/LoongArch/lsx/fsqrt-reciprocal-estimate.ll b/llvm/test/CodeGen/LoongArch/lsx/fsqrt-reciprocal-estimate.ll index 1f744830bd56b..4951696e05a94 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/fsqrt-reciprocal-estimate.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/fsqrt-reciprocal-estimate.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA32 -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA32 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx,+frecipe < %s | FileCheck %s ; RUN: llc --mtriple=loongarch64 --mattr=+lsx,-frecipe < %s | FileCheck %s --check-prefixes=FAULT,FAULT-LA64 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch64 --mattr=+lsx,+frecipe < %s | FileCheck %s ;; 1.0 / (fsqrt vec) define void @one_div_sqrt_v4f32(ptr %res, ptr %a0) nounwind { @@ -19,11 +19,9 @@ define void @one_div_sqrt_v4f32(ptr %res, ptr %a0) nounwind { ; CHECK-NEXT: vfrsqrte.s $vr1, $vr0 ; CHECK-NEXT: vfmul.s $vr1, $vr0, $vr1 ; CHECK-NEXT: vfmul.s $vr0, $vr0, $vr1 -; CHECK-NEXT: lu12i.w $a1, -261120 -; CHECK-NEXT: vreplgr2vr.w $vr2, $a1 +; CHECK-NEXT: vldi $vr2, -1400 ; CHECK-NEXT: vfmadd.s $vr0, $vr0, $vr1, $vr2 -; CHECK-NEXT: lu12i.w $a1, -266240 -; CHECK-NEXT: vreplgr2vr.w $vr2, $a1 +; CHECK-NEXT: vldi $vr2, -3137 ; CHECK-NEXT: vfmul.s $vr1, $vr1, $vr2 ; CHECK-NEXT: vfmul.s $vr0, $vr1, $vr0 ; CHECK-NEXT: vst $vr0, $a0, 0 @@ -40,32 +38,29 @@ define void @one_div_sqrt_v2f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA32-LABEL: one_div_sqrt_v2f64: ; FAULT-LA32: # %bb.0: # %entry ; FAULT-LA32-NEXT: vld $vr0, $a1, 0 -; FAULT-LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0) -; FAULT-LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI1_0) ; FAULT-LA32-NEXT: vfsqrt.d $vr0, $vr0 +; FAULT-LA32-NEXT: vldi $vr1, -912 ; FAULT-LA32-NEXT: vfdiv.d $vr0, $vr1, $vr0 ; FAULT-LA32-NEXT: vst $vr0, $a0, 0 ; FAULT-LA32-NEXT: ret ; -; LA32-LABEL: one_div_sqrt_v2f64: -; LA32: # %bb.0: # %entry -; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: vfrsqrte.d $vr1, $vr0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0) -; LA32-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI1_0) -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_1) -; LA32-NEXT: vld $vr3, $a1, %pc_lo12(.LCPI1_1) -; LA32-NEXT: vfmul.d $vr1, $vr0, $vr1 -; LA32-NEXT: vfmul.d $vr4, $vr0, $vr1 -; LA32-NEXT: vfmadd.d $vr4, $vr4, $vr1, $vr2 -; LA32-NEXT: vfmul.d $vr1, $vr1, $vr3 -; LA32-NEXT: vfmul.d $vr1, $vr1, $vr4 -; LA32-NEXT: vfmul.d $vr0, $vr0, $vr1 -; LA32-NEXT: vfmadd.d $vr0, $vr0, $vr1, $vr2 -; LA32-NEXT: vfmul.d $vr1, $vr1, $vr3 -; LA32-NEXT: vfmul.d $vr0, $vr1, $vr0 -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret +; CHECK-LABEL: one_div_sqrt_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vfrsqrte.d $vr1, $vr0 +; CHECK-NEXT: vfmul.d $vr1, $vr0, $vr1 +; CHECK-NEXT: vfmul.d $vr2, $vr0, $vr1 +; CHECK-NEXT: vldi $vr3, -888 +; CHECK-NEXT: vfmadd.d $vr2, $vr2, $vr1, $vr3 +; CHECK-NEXT: vldi $vr4, -800 +; CHECK-NEXT: vfmul.d $vr1, $vr1, $vr4 +; CHECK-NEXT: vfmul.d $vr1, $vr1, $vr2 +; CHECK-NEXT: vfmul.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vfmadd.d $vr0, $vr0, $vr1, $vr3 +; CHECK-NEXT: vfmul.d $vr1, $vr1, $vr4 +; CHECK-NEXT: vfmul.d $vr0, $vr1, $vr0 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret ; ; FAULT-LA64-LABEL: one_div_sqrt_v2f64: ; FAULT-LA64: # %bb.0: # %entry @@ -73,28 +68,6 @@ define void @one_div_sqrt_v2f64(ptr %res, ptr %a0) nounwind { ; FAULT-LA64-NEXT: vfrsqrt.d $vr0, $vr0 ; FAULT-LA64-NEXT: vst $vr0, $a0, 0 ; FAULT-LA64-NEXT: ret -; -; LA64-LABEL: one_div_sqrt_v2f64: -; LA64: # %bb.0: # %entry -; LA64-NEXT: vld $vr0, $a1, 0 -; LA64-NEXT: vfrsqrte.d $vr1, $vr0 -; LA64-NEXT: vfmul.d $vr1, $vr0, $vr1 -; LA64-NEXT: vfmul.d $vr2, $vr0, $vr1 -; LA64-NEXT: ori $a1, $zero, 0 -; LA64-NEXT: lu32i.d $a1, -524288 -; LA64-NEXT: lu52i.d $a1, $a1, -1024 -; LA64-NEXT: vreplgr2vr.d $vr3, $a1 -; LA64-NEXT: vfmadd.d $vr2, $vr2, $vr1, $vr3 -; LA64-NEXT: lu52i.d $a1, $zero, -1026 -; LA64-NEXT: vreplgr2vr.d $vr4, $a1 -; LA64-NEXT: vfmul.d $vr1, $vr1, $vr4 -; LA64-NEXT: vfmul.d $vr1, $vr1, $vr2 -; LA64-NEXT: vfmul.d $vr0, $vr0, $vr1 -; LA64-NEXT: vfmadd.d $vr0, $vr0, $vr1, $vr3 -; LA64-NEXT: vfmul.d $vr1, $vr1, $vr4 -; LA64-NEXT: vfmul.d $vr0, $vr1, $vr0 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret entry: %v0 = load <2 x double>, ptr %a0, align 16 %sqrt = call fast <2 x double> @llvm.sqrt.v2f64 (<2 x double> %v0) diff --git a/llvm/test/CodeGen/LoongArch/lsx/fsqrt.ll b/llvm/test/CodeGen/LoongArch/lsx/fsqrt.ll index d88e0d1ea7c2d..9664808681bb8 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/fsqrt.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/fsqrt.ll @@ -51,9 +51,8 @@ define void @one_div_sqrt_v2f64(ptr %res, ptr %a0) nounwind { ; LA32-LABEL: one_div_sqrt_v2f64: ; LA32: # %bb.0: # %entry ; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI3_0) ; LA32-NEXT: vfsqrt.d $vr0, $vr0 +; LA32-NEXT: vldi $vr1, -912 ; LA32-NEXT: vfdiv.d $vr0, $vr1, $vr0 ; LA32-NEXT: vst $vr0, $a0, 0 ; LA32-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll index 2bd0b597d79ac..34f22e1f6bf45 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/adda.ll @@ -7,11 +7,7 @@ define void @vadda_b(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.b $vr2, $vr0 -; CHECK-NEXT: vmax.b $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.b $vr2, $vr1 -; CHECK-NEXT: vmax.b $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.b $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -33,11 +29,7 @@ define void @vadda_h(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.h $vr2, $vr0 -; CHECK-NEXT: vmax.h $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.h $vr2, $vr1 -; CHECK-NEXT: vmax.h $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.h $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -59,11 +51,7 @@ define void @vadda_w(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.w $vr2, $vr0 -; CHECK-NEXT: vmax.w $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.w $vr2, $vr1 -; CHECK-NEXT: vmax.w $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.w $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: @@ -85,11 +73,7 @@ define void @vadda_d(ptr %res, ptr %a, ptr %b) nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vld $vr0, $a1, 0 ; CHECK-NEXT: vld $vr1, $a2, 0 -; CHECK-NEXT: vneg.d $vr2, $vr0 -; CHECK-NEXT: vmax.d $vr0, $vr0, $vr2 -; CHECK-NEXT: vneg.d $vr2, $vr1 -; CHECK-NEXT: vmax.d $vr1, $vr1, $vr2 -; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1 +; CHECK-NEXT: vadda.d $vr0, $vr0, $vr1 ; CHECK-NEXT: vst $vr0, $a0, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll index 603bd21ab9af9..fb0b9cee67df5 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/fdiv.ll @@ -53,8 +53,7 @@ define void @one_fdiv_v2f64(ptr %res, ptr %a0) nounwind { ; LA32-LABEL: one_fdiv_v2f64: ; LA32: # %bb.0: # %entry ; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: vld $vr1, $a1, %pc_lo12(.LCPI3_0) +; LA32-NEXT: vldi $vr1, -912 ; LA32-NEXT: vfdiv.d $vr0, $vr1, $vr0 ; LA32-NEXT: vst $vr0, $a0, 0 ; LA32-NEXT: ret diff --git a/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll new file mode 100644 index 0000000000000..b651f11596c82 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s + +define <4 x float> @fadd_elt0_v4f32(float %a) nounwind { +; CHECK-LABEL: fadd_elt0_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <4 x float> poison, float %a, i32 0 + %c = fadd <4 x float> %b, + ret <4 x float> %c +} + +define <2 x double> @fadd_elt0_v2f64(double %a) nounwind { +; CHECK-LABEL: fadd_elt0_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -912 +; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1 +; CHECK-NEXT: ret +entry: + %b = insertelement <2 x double> poison, double %a, i32 0 + %c = fadd <2 x double> %b, + ret <2 x double> %c +} + +define <4 x float> @fsub_splat_v4f32(float %b) nounwind { +; CHECK-LABEL: fsub_splat_v4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vldi $vr1, -1168 +; CHECK-NEXT: fsub.s $fa0, $fa1, $fa0 +; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <4 x float> poison, float 1.0, i32 0 + %insb = insertelement <4 x float> poison, float %b, i32 0 + %va = shufflevector <4 x float> %insa, <4 x float> poison, <4 x i32> zeroinitializer + %vb = shufflevector <4 x float> %insb, <4 x float> poison, <4 x i32> zeroinitializer + %c = fsub <4 x float> %va, %vb + ret <4 x float> %c +} + +define <2 x double> @fsub_splat_v2f64(double %a, double %b) nounwind { +; CHECK-LABEL: fsub_splat_v2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsub.d $fa0, $fa0, $fa1 +; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0 +; CHECK-NEXT: ret +entry: + %insa = insertelement <2 x double> poison, double %a, i32 0 + %insb = insertelement <2 x double> poison, double %b, i32 0 + %va = shufflevector <2 x double> %insa, <2 x double> poison, <2 x i32> zeroinitializer + %vb = shufflevector <2 x double> %insb, <2 x double> poison, <2 x i32> zeroinitializer + %c = fsub <2 x double> %va, %vb + ret <2 x double> %c +} diff --git a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll index 5dbff4a402b3d..8f25a6ba62f9f 100644 --- a/llvm/test/CodeGen/LoongArch/lsx/vselect.ll +++ b/llvm/test/CodeGen/LoongArch/lsx/vselect.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA32 -; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s --check-prefixes=CHECK,LA64 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s define void @select_v16i8_imm(ptr %res, ptr %a0) nounwind { ; CHECK-LABEL: select_v16i8_imm: @@ -50,26 +50,14 @@ define void @select_v8i16(ptr %res, ptr %a0, ptr %a1) nounwind { } define void @select_v4i32(ptr %res, ptr %a0, ptr %a1) nounwind { -; LA32-LABEL: select_v4i32: -; LA32: # %bb.0: -; LA32-NEXT: vld $vr0, $a1, 0 -; LA32-NEXT: vld $vr1, $a2, 0 -; LA32-NEXT: pcalau12i $a1, %pc_hi20(.LCPI3_0) -; LA32-NEXT: vld $vr2, $a1, %pc_lo12(.LCPI3_0) -; LA32-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 -; LA32-NEXT: vst $vr0, $a0, 0 -; LA32-NEXT: ret -; -; LA64-LABEL: select_v4i32: -; LA64: # %bb.0: -; LA64-NEXT: vld $vr0, $a1, 0 -; LA64-NEXT: vld $vr1, $a2, 0 -; LA64-NEXT: ori $a1, $zero, 0 -; LA64-NEXT: lu32i.d $a1, -1 -; LA64-NEXT: vreplgr2vr.d $vr2, $a1 -; LA64-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 -; LA64-NEXT: vst $vr0, $a0, 0 -; LA64-NEXT: ret +; CHECK-LABEL: select_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vld $vr0, $a1, 0 +; CHECK-NEXT: vld $vr1, $a2, 0 +; CHECK-NEXT: vldi $vr2, -1552 +; CHECK-NEXT: vbitsel.v $vr0, $vr1, $vr0, $vr2 +; CHECK-NEXT: vst $vr0, $a0, 0 +; CHECK-NEXT: ret %v0 = load <4 x i32>, ptr %a0 %v1 = load <4 x i32>, ptr %a1 %sel = select <4 x i1> , <4 x i32> %v0, <4 x i32> %v1 diff --git a/llvm/test/CodeGen/LoongArch/merge-offset-option.ll b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll new file mode 100644 index 0000000000000..e5351a6589cf7 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll @@ -0,0 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \ +; RUN: < %s | FileCheck %s --check-prefix=MERGE +; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \ +; RUN: --loongarch-enable-merge-offset=false < %s | FileCheck %s --check-prefix=NO_MERGE + +@g = dso_local global i32 zeroinitializer, align 4 + +define void @foo() nounwind { +; MERGE-LABEL: foo: +; MERGE: # %bb.0: +; MERGE-NEXT: pcalau12i $a0, %pc_hi20(g) +; MERGE-NEXT: ld.w $zero, $a0, %pc_lo12(g) +; MERGE-NEXT: ret +; +; NO_MERGE-LABEL: foo: +; NO_MERGE: # %bb.0: +; NO_MERGE-NEXT: pcalau12i $a0, %pc_hi20(g) +; NO_MERGE-NEXT: addi.d $a0, $a0, %pc_lo12(g) +; NO_MERGE-NEXT: ld.w $zero, $a0, 0 +; NO_MERGE-NEXT: ret + %v = load volatile i32, ptr @g + ret void +} diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float-varargs.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float-varargs.ll new file mode 100644 index 0000000000000..8cbc879310f61 --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float-varargs.ll @@ -0,0 +1,148 @@ +; RUN: llc -mtriple=mips -relocation-model=static -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,O32 %s +; RUN: llc -mtriple=mipsel -relocation-model=static -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,O32 %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,N32,NEW,NEWBE %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,N32,NEW,NEWLE %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64,N64,NEW,NEWBE %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64,N64,NEW,NEWLE %s + +@floats = global [11 x float] zeroinitializer +@doubles = global [11 x double] zeroinitializer + +define void @double_args(double %a, ...) + nounwind { +entry: + %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1 + store volatile double %a, ptr %0 + + %ap = alloca ptr + call void @llvm.va_start(ptr %ap) + %b = va_arg ptr %ap, double + %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2 + store volatile double %b, ptr %1 + call void @llvm.va_end(ptr %ap) + ret void +} + +; ALL-LABEL: double_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) + +; O32 forbids using floating point registers for the non-variable portion. +; N32/N64 allow it. +; O32-DAG: sw $4, 8([[R2]]) +; O32-DAG: sw $5, 12([[R2]]) +; NEW-DAG: sd $4, 8([[R2]]) + +; The varargs portion is dumped to stack +; O32-DAG: sw $6, 16($sp) +; O32-DAG: sw $7, 20($sp) +; NEW-DAG: sd $5, 8($sp) +; NEW-DAG: sd $6, 16($sp) +; NEW-DAG: sd $7, 24($sp) +; NEW-DAG: sd $8, 32($sp) +; NEW-DAG: sd $9, 40($sp) +; NEW-DAG: sd $10, 48($sp) +; NEW-DAG: sd $11, 56($sp) + +; Get the varargs pointer +; O32 has 4 bytes padding, 4 bytes for the varargs pointer, and 8 bytes reserved +; for arguments 1 and 2. +; N32/N64 has 8 bytes for the varargs pointer, and no reserved area. +; O32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 16 +; O32-DAG: sw [[VAPTR]], 4($sp) +; N32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 8 +; N32-DAG: sw [[VAPTR]], 4($sp) +; N64-DAG: daddiu [[VAPTR:\$[0-9]+]], $sp, 8 +; N64-DAG: sd [[VAPTR]], 0($sp) + +; Increment the pointer then get the varargs arg +; LLVM will rebind the load to the stack pointer instead of the varargs pointer +; during lowering. This is fine and doesn't change the behaviour. +; O32-DAG: addiu [[VAPTR]], [[VAPTR]], 8 +; N32-DAG: addiu [[VAPTR]], [[VAPTR]], 8 +; N64-DAG: daddiu [[VAPTR]], [[VAPTR]], 8 +; O32-DAG: lw [[R3:\$[0-9]+]], 16($sp) +; O32-DAG: lw [[R4:\$[0-9]+]], 20($sp) +; O32-DAG: sw [[R3]], 16([[R2]]) +; O32-DAG: sw [[R4]], 20([[R2]]) +; NEW-DAG: ld [[R3:\$[0-9]+]], 8($sp) +; NEW-DAG: sd [[R3]], 16([[R2]]) + +define void @float_args(float %a, ...) nounwind { +entry: + %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1 + store volatile float %a, ptr %0 + + %ap = alloca ptr + call void @llvm.va_start(ptr %ap) + %b = va_arg ptr %ap, float + %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2 + store volatile float %b, ptr %1 + call void @llvm.va_end(ptr %ap) + ret void +} + +; ALL-LABEL: float_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) + +; The first four arguments are the same in O32/N32/N64. +; The non-variable portion should be unaffected. +; O32-DAG: mtc1 $4, $f0 +; O32-DAG: swc1 $f0, 4([[R2]]) +; NEW-DAG: swc1 $f12, 4([[R2]]) + +; The varargs portion is dumped to stack +; O32-DAG: sw $5, 12($sp) +; O32-DAG: sw $6, 16($sp) +; O32-DAG: sw $7, 20($sp) +; NEW-DAG: sd $5, 8($sp) +; NEW-DAG: sd $6, 16($sp) +; NEW-DAG: sd $7, 24($sp) +; NEW-DAG: sd $8, 32($sp) +; NEW-DAG: sd $9, 40($sp) +; NEW-DAG: sd $10, 48($sp) +; NEW-DAG: sd $11, 56($sp) + +; Get the varargs pointer +; O32 has 4 bytes padding, 4 bytes for the varargs pointer, and should have 8 +; bytes reserved for arguments 1 and 2 (the first float arg) but as discussed in +; arguments-float.ll, GCC doesn't agree with MD00305 and treats floats as 4 +; bytes so we only have 12 bytes total. +; N32/N64 has 8 bytes for the varargs pointer, and no reserved area. +; O32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 12 +; O32-DAG: sw [[VAPTR]], 4($sp) +; N32-DAG: addiu [[VAPTR:\$[0-9]+]], $sp, 8 +; N32-DAG: sw [[VAPTR]], 4($sp) +; N64-DAG: daddiu [[VAPTR:\$[0-9]+]], $sp, 8 +; N64-DAG: sd [[VAPTR]], 0($sp) + +; Increment the pointer then get the varargs arg +; LLVM will rebind the load to the stack pointer instead of the varargs pointer +; during lowering. This is fine and doesn't change the behaviour. +; Also, in big-endian mode the offset must be increased by 4 to retrieve the +; correct half of the argument slot. +; +; O32-DAG: addiu [[VAPTR]], [[VAPTR]], 4 +; N32-DAG: addiu [[VAPTR]], [[VAPTR]], 8 +; N64-DAG: daddiu [[VAPTR]], [[VAPTR]], 8 +; O32-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 12($sp) +; NEWLE-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 8($sp) +; NEWBE-DAG: lwc1 [[FTMP1:\$f[0-9]+]], 12($sp) +; ALL-DAG: swc1 [[FTMP1]], 8([[R2]]) + +declare void @llvm.va_start(ptr) +declare void @llvm.va_copy(ptr, ptr) +declare void @llvm.va_end(ptr) diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float.ll new file mode 100644 index 0000000000000..6b7ad03c8e1c2 --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-float.ll @@ -0,0 +1,224 @@ +; RUN: llc -mtriple=mips -relocation-model=static -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,O32 %s +; RUN: llc -mtriple=mipsel -relocation-model=static -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,O32 %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,NEW %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32,NEW %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64,NEW %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64,NEW %s + +@bytes = global [11 x i8] zeroinitializer +@dwords = global [11 x i64] zeroinitializer +@floats = global [11 x float] zeroinitializer +@doubles = global [11 x double] zeroinitializer + +define void @double_args(double %a, double %b, double %c, double %d, double %e, + double %f, double %g, double %h, double %i) nounwind { +entry: + %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1 + store volatile double %a, ptr %0 + %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2 + store volatile double %b, ptr %1 + %2 = getelementptr [11 x double], ptr @doubles, i32 0, i32 3 + store volatile double %c, ptr %2 + %3 = getelementptr [11 x double], ptr @doubles, i32 0, i32 4 + store volatile double %d, ptr %3 + %4 = getelementptr [11 x double], ptr @doubles, i32 0, i32 5 + store volatile double %e, ptr %4 + %5 = getelementptr [11 x double], ptr @doubles, i32 0, i32 6 + store volatile double %f, ptr %5 + %6 = getelementptr [11 x double], ptr @doubles, i32 0, i32 7 + store volatile double %g, ptr %6 + %7 = getelementptr [11 x double], ptr @doubles, i32 0, i32 8 + store volatile double %h, ptr %7 + %8 = getelementptr [11 x double], ptr @doubles, i32 0, i32 9 + store volatile double %i, ptr %8 + ret void +} + +; ALL-LABEL: double_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) + +; The first four arguments are the same in O32/N32/N64. +; The first argument is floating point but single-float is enabled so floating +; point registers are not used. +; O32-DAG: sw $4, 8([[R2]]) +; O32-DAG: sw $5, 12([[R2]]) +; NEW-DAG: sd $4, 8([[R2]]) + +; O32-DAG: sw $6, 16([[R2]]) +; O32-DAG: sw $7, 20([[R2]]) +; NEW-DAG: sd $5, 16([[R2]]) + +; O32 has run out of argument registers and starts using the stack +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 16($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 20($sp) +; O32-DAG: sw [[R3]], 24([[R2]]) +; O32-DAG: sw [[R4]], 28([[R2]]) +; NEW-DAG: sd $6, 24([[R2]]) + +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 24($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 28($sp) +; O32-DAG: sw [[R3]], 32([[R2]]) +; O32-DAG: sw [[R4]], 36([[R2]]) +; NEW-DAG: sd $7, 32([[R2]]) + +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 32($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 36($sp) +; O32-DAG: sw [[R3]], 40([[R2]]) +; O32-DAG: sw [[R4]], 44([[R2]]) +; NEW-DAG: sd $8, 40([[R2]]) + +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 40($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 44($sp) +; O32-DAG: sw [[R3]], 48([[R2]]) +; O32-DAG: sw [[R4]], 52([[R2]]) +; NEW-DAG: sd $9, 48([[R2]]) + +; O32-DAG: lw [[R3:\$([0-9]+|gp)]], 48($sp) +; O32-DAG: lw [[R4:\$([0-9]+|gp)]], 52($sp) +; O32-DAG: sw [[R3]], 56([[R2]]) +; O32-DAG: sw [[R4]], 60([[R2]]) +; NEW-DAG: sd $10, 56([[R2]]) + +; N32/N64 have run out of registers and starts using the stack too +; O32-DAG: lw [[R3:\$[0-9]+]], 56($sp) +; O32-DAG: lw [[R4:\$[0-9]+]], 60($sp) +; O32-DAG: sw [[R3]], 64([[R2]]) +; O32-DAG: sw [[R4]], 68([[R2]]) +; NEW-DAG: ld [[R3:\$[0-9]+]], 0($sp) +; NEW-DAG: sd $11, 64([[R2]]) + +define void @float_args(float %a, float %b, float %c, float %d, float %e, + float %f, float %g, float %h, float %i) nounwind { +entry: + %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1 + store volatile float %a, ptr %0 + %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2 + store volatile float %b, ptr %1 + %2 = getelementptr [11 x float], ptr @floats, i32 0, i32 3 + store volatile float %c, ptr %2 + %3 = getelementptr [11 x float], ptr @floats, i32 0, i32 4 + store volatile float %d, ptr %3 + %4 = getelementptr [11 x float], ptr @floats, i32 0, i32 5 + store volatile float %e, ptr %4 + %5 = getelementptr [11 x float], ptr @floats, i32 0, i32 6 + store volatile float %f, ptr %5 + %6 = getelementptr [11 x float], ptr @floats, i32 0, i32 7 + store volatile float %g, ptr %6 + %7 = getelementptr [11 x float], ptr @floats, i32 0, i32 8 + store volatile float %h, ptr %7 + %8 = getelementptr [11 x float], ptr @floats, i32 0, i32 9 + store volatile float %i, ptr %8 + ret void +} + +; ALL-LABEL: float_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) +; SYM64-DAG: daddiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) + +; The first argument is floating point so floating point registers are used. +; The first argument is the same for O32/N32/N64 but the second argument differs +; by register +; ALL-DAG: swc1 $f12, 4([[R1]]) +; O32-DAG: swc1 $f14, 8([[R1]]) +; NEW-DAG: swc1 $f13, 8([[R1]]) + +; O32 has run out of argument registers and (in theory) starts using the stack +; I've yet to find a reference in the documentation about this but GCC uses up +; the remaining two argument slots in the GPR's first. We'll do the same for +; compatibility. +; O32-DAG: mtc1 $6, $f0 +; O32-DAG: swc1 $f0, 12([[R1]]) +; NEW-DAG: swc1 $f14, 12([[R1]]) +; O32-DAG: mtc1 $7, $f0 +; O32-DAG: swc1 $f0, 16([[R1]]) +; NEW-DAG: swc1 $f15, 16([[R1]]) + +; O32 is definitely out of registers now and switches to the stack. +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 16($sp) +; O32-DAG: swc1 [[F1]], 20([[R1]]) +; NEW-DAG: swc1 $f16, 20([[R1]]) +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 20($sp) +; O32-DAG: swc1 [[F1]], 24([[R1]]) +; NEW-DAG: swc1 $f17, 24([[R1]]) +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 24($sp) +; O32-DAG: swc1 [[F1]], 28([[R1]]) +; NEW-DAG: swc1 $f18, 28([[R1]]) +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 28($sp) +; O32-DAG: swc1 [[F1]], 32([[R1]]) +; NEW-DAG: swc1 $f19, 32([[R1]]) + +; N32/N64 have run out of registers and start using the stack too +; O32-DAG: lwc1 [[F1:\$f[0-9]+]], 32($sp) +; O32-DAG: swc1 [[F1]], 36([[R1]]) +; NEW-DAG: lwc1 [[F1:\$f[0-9]+]], 0($sp) +; NEW-DAG: swc1 [[F1]], 36([[R1]]) + + +define void @double_arg2(i8 %a, double %b) nounwind { +entry: + %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1 + store volatile i8 %a, ptr %0 + %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1 + store volatile double %b, ptr %1 + ret void +} + +; ALL-LABEL: double_arg2: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes) +; SYM64-DAG: daddiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes) +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(doubles) + +; The first four arguments are the same in O32/N32/N64. +; The first argument isn't floating point so floating point registers are not +; used. +; The second slot is insufficiently aligned for double on O32 so it is skipped. +; Also, double occupies two slots on O32 and only one for N32/N64. +; ALL-DAG: sb $4, 1([[R1]]) +; O32-DAG: sw $6, 8([[R2]]) +; O32-DAG: sw $7, 12([[R2]]) +; NEW-DAG: sd $5, 8([[R2]]) + +define void @float_arg2(i8 %a, float %b) nounwind { +entry: + %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1 + store volatile i8 %a, ptr %0 + %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 1 + store volatile float %b, ptr %1 + ret void +} + +; ALL-LABEL: float_arg2: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes) +; SYM64-DAG: daddiu [[R1:\$[0-9]+]], ${{[0-9]+}}, %lo(bytes) +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(floats) + +; The first argument is the same in O32/N32/N64. +; ALL-DAG: sb $4, 1([[R1]]) + +; The first argument isn't floating point so floating point registers are not +; used in O32, but N32/N64 will still use them. +; MD00305 and GCC disagree on this one. MD00305 says that floats are treated +; as 8-byte aligned and occupy two slots on O32. GCC is treating them as 4-byte +; aligned and occupying one slot. We'll use GCC's definition. +; O32-DAG: mtc1 $5, $f0 +; O32-DAG: swc1 $f0, 4([[R2]]) +; NEW-DAG: swc1 $f13, 4([[R2]]) diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-fp128.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-fp128.ll new file mode 100644 index 0000000000000..9268e37b02fb5 --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-single-fp128.ll @@ -0,0 +1,42 @@ +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32 %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32 %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64 %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64 %s + +@ldoubles = global [11 x fp128] zeroinitializer + +define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind { +entry: + %0 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 1 + store volatile fp128 %a, ptr %0 + %1 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 2 + store volatile fp128 %b, ptr %1 + %2 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 3 + store volatile fp128 %c, ptr %2 + %3 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 4 + store volatile fp128 %d, ptr %3 + %4 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 5 + store volatile fp128 %e, ptr %4 + ret void +} + +; ALL-LABEL: ldouble_args: +; We won't test the way the global address is calculated in this test. This is +; just to get the register number for the other checks. +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(ldoubles) + +; The first four arguments are the same in N32/N64. +; ALL-DAG: sd $5, 24([[R2]]) +; ALL-DAG: sd $4, 16([[R2]]) +; ALL-DAG: sd $7, 40([[R2]]) +; ALL-DAG: sd $6, 32([[R2]]) +; ALL-DAG: sd $9, 56([[R2]]) +; ALL-DAG: sd $8, 48([[R2]]) +; ALL-DAG: sd $11, 72([[R2]]) +; ALL-DAG: sd $10, 64([[R2]]) diff --git a/llvm/test/CodeGen/Mips/cconv/callee-saved-singlefloat.ll b/llvm/test/CodeGen/Mips/cconv/callee-saved-singlefloat.ll new file mode 100644 index 0000000000000..5bf1f2c2d60da --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/callee-saved-singlefloat.ll @@ -0,0 +1,111 @@ +; RUN: llc -mtriple=mips -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,O32 %s +; RUN: llc -mtriple=mipsel -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,O32 %s + +; RUN: llc -mtriple=mips64 -target-abi n32 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,N32 %s +; RUN: llc -mtriple=mips64el -target-abi n32 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,N32 %s +; RUN: llc -mtriple=mips64 -target-abi n32 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,ALL-INV,N32-INV %s +; RUN: llc -mtriple=mips64el -target-abi n32 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,ALL-INV,N32-INV %s + +; RUN: llc -mtriple=mips64 -target-abi n64 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,N64 %s +; RUN: llc -mtriple=mips64el -target-abi n64 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,N64 %s +; RUN: llc -mtriple=mips64 -target-abi n64 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,ALL-INV,N64-INV %s +; RUN: llc -mtriple=mips64el -target-abi n64 -mattr=+single-float < %s | FileCheck --check-prefixes=ALL,ALL-INV,N64-INV %s + +define void @fpu_clobber() nounwind { +entry: + call void asm "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f13},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() + ret void +} + +; ALL-LABEL: fpu_clobber: +; ALL-INV-NOT: swc1 $f0, +; ALL-INV-NOT: swc1 $f1, +; ALL-INV-NOT: swc1 $f2, +; ALL-INV-NOT: swc1 $f3, +; ALL-INV-NOT: swc1 $f4, +; ALL-INV-NOT: swc1 $f5, +; ALL-INV-NOT: swc1 $f6, +; ALL-INV-NOT: swc1 $f7, +; ALL-INV-NOT: swc1 $f8, +; ALL-INV-NOT: swc1 $f9, +; ALL-INV-NOT: swc1 $f10, +; ALL-INV-NOT: swc1 $f11, +; ALL-INV-NOT: swc1 $f12, +; ALL-INV-NOT: swc1 $f13, +; ALL-INV-NOT: swc1 $f14, +; ALL-INV-NOT: swc1 $f15, +; ALL-INV-NOT: swc1 $f16, +; ALL-INV-NOT: swc1 $f17, +; ALL-INV-NOT: swc1 $f18, +; ALL-INV-NOT: swc1 $f19, + +; O32: addiu $sp, $sp, -48 +; O32-DAG: swc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp) +; O32-DAG: swc1 [[F21:\$f21]], [[OFF21:[0-9]+]]($sp) +; O32-DAG: swc1 [[F22:\$f22]], [[OFF22:[0-9]+]]($sp) +; O32-DAG: swc1 [[F23:\$f23]], [[OFF23:[0-9]+]]($sp) +; O32-DAG: swc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp) +; O32-DAG: swc1 [[F25:\$f25]], [[OFF25:[0-9]+]]($sp) +; O32-DAG: swc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp) +; O32-DAG: swc1 [[F27:\$f27]], [[OFF27:[0-9]+]]($sp) +; O32-DAG: swc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp) +; O32-DAG: swc1 [[F29:\$f29]], [[OFF29:[0-9]+]]($sp) +; O32-DAG: swc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp) +; O32-DAG: swc1 [[F31:\$f31]], [[OFF31:[0-9]+]]($sp) +; O32-DAG: lwc1 [[F20]], [[OFF20]]($sp) +; O32-DAG: lwc1 [[F21]], [[OFF21]]($sp) +; O32-DAG: lwc1 [[F22]], [[OFF22]]($sp) +; O32-DAG: lwc1 [[F23]], [[OFF23]]($sp) +; O32-DAG: lwc1 [[F24]], [[OFF24]]($sp) +; O32-DAG: lwc1 [[F25]], [[OFF25]]($sp) +; O32-DAG: lwc1 [[F26]], [[OFF26]]($sp) +; O32-DAG: lwc1 [[F27]], [[OFF27]]($sp) +; O32-DAG: lwc1 [[F28]], [[OFF28]]($sp) +; O32-DAG: lwc1 [[F29]], [[OFF29]]($sp) +; O32-DAG: lwc1 [[F30]], [[OFF30]]($sp) +; O32-DAG: lwc1 [[F31]], [[OFF31]]($sp) +; O32: addiu $sp, $sp, 48 + +; N32: addiu $sp, $sp, -32 +; N32-DAG: swc1 [[F20:\$f20]], [[OFF20:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f21, +; N32-DAG: swc1 [[F22:\$f22]], [[OFF22:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f23, +; N32-DAG: swc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f25, +; N32-DAG: swc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f27, +; N32-DAG: swc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f29, +; N32-DAG: swc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp) +; N32-INV-NOT: swc1 $f31, +; N32-DAG: lwc1 [[F20]], [[OFF20]]($sp) +; N32-DAG: lwc1 [[F22]], [[OFF22]]($sp) +; N32-DAG: lwc1 [[F24]], [[OFF24]]($sp) +; N32-DAG: lwc1 [[F26]], [[OFF26]]($sp) +; N32-DAG: lwc1 [[F28]], [[OFF28]]($sp) +; N32-DAG: lwc1 [[F30]], [[OFF30]]($sp) +; N32: addiu $sp, $sp, 32 + +; N64: addiu $sp, $sp, -32 +; N64-INV-NOT: swc1 $f20, +; N64-INV-NOT: swc1 $f21, +; N64-INV-NOT: swc1 $f22, +; N64-INV-NOT: swc1 $f23, +; N64-DAG: swc1 [[F24:\$f24]], [[OFF24:[0-9]+]]($sp) +; N64-DAG: swc1 [[F25:\$f25]], [[OFF25:[0-9]+]]($sp) +; N64-DAG: swc1 [[F26:\$f26]], [[OFF26:[0-9]+]]($sp) +; N64-DAG: swc1 [[F27:\$f27]], [[OFF27:[0-9]+]]($sp) +; N64-DAG: swc1 [[F28:\$f28]], [[OFF28:[0-9]+]]($sp) +; N64-DAG: swc1 [[F29:\$f29]], [[OFF29:[0-9]+]]($sp) +; N64-DAG: swc1 [[F30:\$f30]], [[OFF30:[0-9]+]]($sp) +; N64-DAG: swc1 [[F31:\$f31]], [[OFF31:[0-9]+]]($sp) +; N64-DAG: lwc1 [[F24]], [[OFF24]]($sp) +; N64-DAG: lwc1 [[F25]], [[OFF25]]($sp) +; N64-DAG: lwc1 [[F26]], [[OFF26]]($sp) +; N64-DAG: lwc1 [[F27]], [[OFF27]]($sp) +; N64-DAG: lwc1 [[F28]], [[OFF28]]($sp) +; N64-DAG: lwc1 [[F29]], [[OFF29]]($sp) +; N64-DAG: lwc1 [[F30]], [[OFF30]]($sp) +; N64-DAG: lwc1 [[F31]], [[OFF31]]($sp) +; N64: addiu $sp, $sp, 32 \ No newline at end of file diff --git a/llvm/test/CodeGen/Mips/cconv/return-hard-single-float.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-single-float.ll new file mode 100644 index 0000000000000..1abf08d8200fb --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/return-hard-single-float.ll @@ -0,0 +1,43 @@ +; RUN: llc -mtriple=mips-linux-gnu -relocation-model=static -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,O32 %s +; RUN: llc -mtriple=mipsel-linux-gnu -relocation-model=static -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,O32 %s + +; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n32 -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,N32 %s +; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n32 -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,N32 %s + +; RUN: llc -mtriple=mips64-linux-gnu -relocation-model=static -target-abi n64 -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,N64 %s +; RUN: llc -mtriple=mips64el-linux-gnu -relocation-model=static -target-abi n64 -mattr=+single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,N64 %s + +@float = global float zeroinitializer +@double = global double zeroinitializer + +define float @retfloat() nounwind { +entry: + %0 = load volatile float, ptr @float + ret float %0 +} + +; ALL-LABEL: retfloat: +; O32-DAG: lui [[R1:\$[0-9]+]], %hi(float) +; O32-DAG: lwc1 $f0, %lo(float)([[R1]]) +; N32-DAG: lui [[R1:\$[0-9]+]], %hi(float) +; N32-DAG: lwc1 $f0, %lo(float)([[R1]]) +; N64-DAG: lwc1 $f0, %lo(float)([[R1:\$[0-9+]]]) + +define double @retdouble() nounwind { +entry: + %0 = load volatile double, ptr @double + ret double %0 +} + +; ALL-LABEL: retdouble: +; O32-DAG: lw $2, %lo(double)([[R1:\$[0-9]+]]) +; O32-DAG: addiu [[R2:\$[0-9]+]], [[R1]], %lo(double) +; O32-DAG: lw $3, 4([[R2]]) +; N32-DAG: ld $2, %lo(double)([[R1:\$[0-9]+]]) +; N64-DAG: ld $2, %lo(double)([[R1:\$[0-9]+]]) diff --git a/llvm/test/CodeGen/Mips/cconv/return-hard-single-fp128.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-single-fp128.ll new file mode 100644 index 0000000000000..e4d04146ecc2f --- /dev/null +++ b/llvm/test/CodeGen/Mips/cconv/return-hard-single-fp128.ll @@ -0,0 +1,24 @@ +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32 %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n32 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM32 %s + +; RUN: llc -mtriple=mips64 -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64 %s +; RUN: llc -mtriple=mips64el -relocation-model=static -target-abi n64 -mattr=single-float < %s \ +; RUN: | FileCheck --check-prefixes=ALL,SYM64 %s + +@fp128 = global fp128 zeroinitializer + +define fp128 @retldouble() nounwind { +entry: + %0 = load volatile fp128, ptr @fp128 + ret fp128 %0 +} + +; ALL-LABEL: retldouble: +; SYM32-DAG: addiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(fp128) +; SYM64-DAG: daddiu [[R2:\$[0-9]+]], ${{[0-9]+}}, %lo(fp128) + +; ALL-DAG: ld $2, %lo(fp128)([[R2]]) +; ALL-DAG: ld $3, 8([[R2]]) diff --git a/llvm/test/CodeGen/Mips/inlineasm-constraints-singlefloat.ll b/llvm/test/CodeGen/Mips/inlineasm-constraints-singlefloat.ll new file mode 100644 index 0000000000000..ddebddcdab260 --- /dev/null +++ b/llvm/test/CodeGen/Mips/inlineasm-constraints-singlefloat.ll @@ -0,0 +1,68 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=mips -mattr=+single-float < %s | FileCheck %s --check-prefix=MIPS32 +; RUN: llc -mtriple=mips64 -mattr=+single-float < %s | FileCheck %s --check-prefix=MIPS64 + +define void @read_double(ptr %0) { +; MIPS32-LABEL: read_double: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lw $2, 4($4) +; MIPS32-NEXT: lw $3, 0($4) +; MIPS32-NEXT: #APP +; MIPS32-NEXT: #NO_APP +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop +; +; MIPS64-LABEL: read_double: +; MIPS64: # %bb.0: +; MIPS64-NEXT: ld $2, 0($4) +; MIPS64-NEXT: #APP +; MIPS64-NEXT: #NO_APP +; MIPS64-NEXT: jr $ra +; MIPS64-NEXT: nop + %2 = load double, ptr %0, align 8 + tail call void asm sideeffect "", "r,~{$1}"(double %2) + ret void +} + +define void @read_float(ptr %0) { +; MIPS32-LABEL: read_float: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lwc1 $f0, 0($4) +; MIPS32-NEXT: #APP +; MIPS32-NEXT: #NO_APP +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop +; +; MIPS64-LABEL: read_float: +; MIPS64: # %bb.0: +; MIPS64-NEXT: lwc1 $f0, 0($4) +; MIPS64-NEXT: #APP +; MIPS64-NEXT: #NO_APP +; MIPS64-NEXT: jr $ra +; MIPS64-NEXT: nop + %2 = load float, ptr %0, align 8 + tail call void asm sideeffect "", "f"(float %2) + ret void +} + +; Test that a proper register class is assigned to clobbers in single-float mode +define float @explicit_float_register_clobber(ptr %0) { +; MIPS32-LABEL: explicit_float_register_clobber: +; MIPS32: # %bb.0: +; MIPS32-NEXT: lwc1 $f1, 0($4) +; MIPS32-NEXT: #APP +; MIPS32-NEXT: #NO_APP +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: mov.s $f0, $f1 +; +; MIPS64-LABEL: explicit_float_register_clobber: +; MIPS64: # %bb.0: +; MIPS64-NEXT: lwc1 $f1, 0($4) +; MIPS64-NEXT: #APP +; MIPS64-NEXT: #NO_APP +; MIPS64-NEXT: jr $ra +; MIPS64-NEXT: mov.s $f0, $f1 + %2 = load float, ptr %0, align 8 + tail call void asm sideeffect "", "~{$f0}"() + ret float %2 +} diff --git a/llvm/test/CodeGen/Mips/int-to-float-conversion.ll b/llvm/test/CodeGen/Mips/int-to-float-conversion.ll index 84bc6a253595a..1c8ad9ad07e15 100644 --- a/llvm/test/CodeGen/Mips/int-to-float-conversion.ll +++ b/llvm/test/CodeGen/Mips/int-to-float-conversion.ll @@ -1,13 +1,24 @@ -; RUN: llc -mtriple=mipsel < %s | FileCheck %s -check-prefix=32 -; RUN: llc -mtriple=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefix=64 -; RUN: llc -mtriple=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64 +; RUN: llc -mtriple=mipsel < %s | FileCheck %s -check-prefixes=ALL,32,32DF +; RUN: llc -mtriple=mipsel -mattr=+single-float < %s | FileCheck %s -check-prefixes=ALL,32,32SF + +; RUN: llc -mtriple=mips64el -mcpu=mips4 < %s | FileCheck %s -check-prefixes=ALL,64,64DF +; RUN: llc -mtriple=mips64el -mcpu=mips4 -mattr=+single-float < %s \ +; RUN: | FileCheck %s -check-prefixes=ALL,64,64SF + +; RUN: llc -mtriple=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefixes=ALL,64,64DF +; RUN: llc -mtriple=mips64el -mcpu=mips64 -mattr=+single-float < %s \ +; RUN: | FileCheck %s -check-prefixes=ALL,64,64SF + +; Test various combinations of 32/64bit GP registers and single/double floating point support. @i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4 @i3 = common global ptr null, align 4 -; 32-LABEL: test_float_int_: -; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 32: cvt.s.w $f{{[0-9]+}}, $f[[R0]] +; ALL-LABEL: test_float_int_: +; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 32: cvt.s.w $f{{[0-9]+}}, $f[[R0]] +; 64: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 64: cvt.s.w $f{{[0-9]+}}, $f[[R0]] define float @test_float_int_(i32 %a) { entry: @@ -15,12 +26,13 @@ entry: ret float %conv } -; 32-LABEL: test_double_int_: -; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 32: cvt.d.w $f{{[0-9]+}}, $f[[R0]] -; 64-LABEL: test_double_int_: -; 64: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 64: cvt.d.w $f{{[0-9]+}}, $f[[R0]] +; ALL-LABEL: test_double_int_: +; 32DF: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 32DF: cvt.d.w $f{{[0-9]+}}, $f[[R0]] +; 32SF: jal __floatsidf +; 64DF: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 64DF: cvt.d.w $f{{[0-9]+}}, $f[[R0]] +; 64SF: jal __floatsidf define double @test_double_int_(i32 %a) { entry: @@ -28,9 +40,11 @@ entry: ret double %conv } -; 64-LABEL: test_float_LL_: -; 64: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 64: cvt.s.l $f{{[0-9]+}}, $f[[R0]] +; ALL-LABEL: test_float_LL_: +; 32: jal __floatdisf +; 64DF: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 64DF: cvt.s.l $f{{[0-9]+}}, $f[[R0]] +; 64SF: jal __floatdisf define float @test_float_LL_(i64 %a) { entry: @@ -38,9 +52,11 @@ entry: ret float %conv } -; 64-LABEL: test_double_LL_: -; 64: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] -; 64: cvt.d.l $f{{[0-9]+}}, $f[[R0]] +; ALL-LABEL: test_double_LL_: +; 32: jal __floatdidf +; 64DF: dmtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]] +; 64DF: cvt.d.l $f{{[0-9]+}}, $f[[R0]] +; 64SF: jal __floatdidf define double @test_double_LL_(i64 %a) { entry: diff --git a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll new file mode 100644 index 0000000000000..18fb87935d17d --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll @@ -0,0 +1,119 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mcpu=sm_90a -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-SM90A %s +; RUN: %if ptxas-12.7 %{ \ +; RUN: llc < %s -mcpu=sm_90a -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | %ptxas-verify -arch=sm_90a \ +; RUN: %} +; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-SM100 %s +; RUN: %if ptxas-12.7 %{ \ +; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | %ptxas-verify -arch=sm_100 \ +; RUN: %} + +; Test that v2i32 -> v2f32 conversions don't emit bitwise operations on i64. + +target triple = "nvptx64-nvidia-cuda" + +declare <2 x i32> @return_i32x2(i32 %0) + +; Test with v2i32. +define ptx_kernel void @store_i32x2(i32 %0, ptr %p) { +; CHECK-SM90A-LABEL: store_i32x2( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b32 %r<6>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<2>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.b64 %rd1, [store_i32x2_param_1]; +; CHECK-SM90A-NEXT: ld.param.b32 %r1, [store_i32x2_param_0]; +; CHECK-SM90A-NEXT: { // callseq 0, 0 +; CHECK-SM90A-NEXT: .param .b32 param0; +; CHECK-SM90A-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-SM90A-NEXT: st.param.b32 [param0], %r1; +; CHECK-SM90A-NEXT: call.uni (retval0), return_i32x2, (param0); +; CHECK-SM90A-NEXT: ld.param.v2.b32 {%r2, %r3}, [retval0]; +; CHECK-SM90A-NEXT: } // callseq 0 +; CHECK-SM90A-NEXT: add.rn.f32 %r4, %r3, %r3; +; CHECK-SM90A-NEXT: add.rn.f32 %r5, %r2, %r2; +; CHECK-SM90A-NEXT: st.v2.b32 [%rd1], {%r5, %r4}; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: store_i32x2( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<2>; +; CHECK-SM100-NEXT: .reg .b64 %rd<4>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b64 %rd1, [store_i32x2_param_1]; +; CHECK-SM100-NEXT: ld.param.b32 %r1, [store_i32x2_param_0]; +; CHECK-SM100-NEXT: { // callseq 0, 0 +; CHECK-SM100-NEXT: .param .b32 param0; +; CHECK-SM100-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-SM100-NEXT: st.param.b32 [param0], %r1; +; CHECK-SM100-NEXT: call.uni (retval0), return_i32x2, (param0); +; CHECK-SM100-NEXT: ld.param.b64 %rd2, [retval0]; +; CHECK-SM100-NEXT: } // callseq 0 +; CHECK-SM100-NEXT: add.rn.f32x2 %rd3, %rd2, %rd2; +; CHECK-SM100-NEXT: st.b64 [%rd1], %rd3; +; CHECK-SM100-NEXT: ret; + %v = call <2 x i32> @return_i32x2(i32 %0) + %v.f32x2 = bitcast <2 x i32> %v to <2 x float> + %res = fadd <2 x float> %v.f32x2, %v.f32x2 + store <2 x float> %res, ptr %p, align 8 + ret void +} + +; Test with inline ASM returning { <1 x float>, <1 x float> }, which decays to +; v2i32. +define ptx_kernel void @inlineasm(ptr %p) { +; CHECK-SM90A-LABEL: inlineasm( +; CHECK-SM90A: { +; CHECK-SM90A-NEXT: .reg .b32 %r<7>; +; CHECK-SM90A-NEXT: .reg .b64 %rd<2>; +; CHECK-SM90A-EMPTY: +; CHECK-SM90A-NEXT: // %bb.0: +; CHECK-SM90A-NEXT: ld.param.b64 %rd1, [inlineasm_param_0]; +; CHECK-SM90A-NEXT: mov.b32 %r3, 0; +; CHECK-SM90A-NEXT: mov.b32 %r4, %r3; +; CHECK-SM90A-NEXT: mov.b32 %r2, %r4; +; CHECK-SM90A-NEXT: mov.b32 %r1, %r3; +; CHECK-SM90A-NEXT: // begin inline asm +; CHECK-SM90A-NEXT: // nop +; CHECK-SM90A-NEXT: // end inline asm +; CHECK-SM90A-NEXT: mul.rn.f32 %r5, %r2, 0f00000000; +; CHECK-SM90A-NEXT: mul.rn.f32 %r6, %r1, 0f00000000; +; CHECK-SM90A-NEXT: st.v2.b32 [%rd1], {%r6, %r5}; +; CHECK-SM90A-NEXT: ret; +; +; CHECK-SM100-LABEL: inlineasm( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<6>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.b64 %rd1, [inlineasm_param_0]; +; CHECK-SM100-NEXT: mov.b32 %r3, 0; +; CHECK-SM100-NEXT: mov.b32 %r4, %r3; +; CHECK-SM100-NEXT: mov.b32 %r2, %r4; +; CHECK-SM100-NEXT: mov.b32 %r1, %r3; +; CHECK-SM100-NEXT: // begin inline asm +; CHECK-SM100-NEXT: // nop +; CHECK-SM100-NEXT: // end inline asm +; CHECK-SM100-NEXT: mov.b64 %rd2, {%r1, %r2}; +; CHECK-SM100-NEXT: mov.b32 %r5, 0f00000000; +; CHECK-SM100-NEXT: mov.b64 %rd3, {%r5, %r5}; +; CHECK-SM100-NEXT: mul.rn.f32x2 %rd4, %rd2, %rd3; +; CHECK-SM100-NEXT: st.b64 [%rd1], %rd4; +; CHECK-SM100-NEXT: ret; + %r = call { <1 x float>, <1 x float> } asm sideeffect "// nop", "=f,=f,0,1"(<1 x float> zeroinitializer, <1 x float> zeroinitializer) + %i0 = extractvalue { <1 x float>, <1 x float> } %r, 0 + %i1 = extractvalue { <1 x float>, <1 x float> } %r, 1 + %i4 = shufflevector <1 x float> %i0, <1 x float> %i1, <2 x i32> + %mul = fmul < 2 x float> %i4, zeroinitializer + store <2 x float> %mul, ptr %p, align 8 + ret void +} +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll index 217bb483682ff..a90cfff51e2c6 100644 --- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll @@ -1938,16 +1938,29 @@ define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 { } define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 { -; CHECK-LABEL: test_uitofp_2xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0]; -; CHECK-NEXT: cvt.rn.f32.u32 %r3, %r2; -; CHECK-NEXT: cvt.rn.f32.u32 %r4, %r1; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_uitofp_2xi32( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0]; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_uitofp_2xi32( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b32 %r<5>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-F32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-F32X2-NEXT: ret; %r = uitofp <2 x i32> %a to <2 x float> ret <2 x float> %r } @@ -1969,16 +1982,29 @@ define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 { } define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 { -; CHECK-LABEL: test_sitofp_2xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0]; -; CHECK-NEXT: cvt.rn.f32.s32 %r3, %r2; -; CHECK-NEXT: cvt.rn.f32.s32 %r4, %r1; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_sitofp_2xi32( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0]; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_sitofp_2xi32( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b32 %r<5>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_sitofp_2xi32_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-F32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-F32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-F32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-F32X2-NEXT: ret; %r = sitofp <2 x i32> %a to <2 x float> ret <2 x float> %r } @@ -2017,16 +2043,17 @@ define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 { ; CHECK-F32X2-LABEL: test_uitofp_2xi32_fadd( ; CHECK-F32X2: { ; CHECK-F32X2-NEXT: .reg .b32 %r<5>; -; CHECK-F32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<5>; ; CHECK-F32X2-EMPTY: ; CHECK-F32X2-NEXT: // %bb.0: -; CHECK-F32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0]; -; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_1]; +; CHECK-F32X2-NEXT: ld.param.b64 %rd2, [test_uitofp_2xi32_fadd_param_1]; +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; ; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; ; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; -; CHECK-F32X2-NEXT: mov.b64 %rd2, {%r4, %r3}; -; CHECK-F32X2-NEXT: add.rn.f32x2 %rd3, %rd1, %rd2; -; CHECK-F32X2-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-F32X2-NEXT: mov.b64 %rd3, {%r4, %r3}; +; CHECK-F32X2-NEXT: add.rn.f32x2 %rd4, %rd2, %rd3; +; CHECK-F32X2-NEXT: st.param.b64 [func_retval0], %rd4; ; CHECK-F32X2-NEXT: ret; %c = uitofp <2 x i32> %a to <2 x float> %r = fadd <2 x float> %b, %c @@ -2114,14 +2141,23 @@ define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 { } define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 { -; CHECK-LABEL: test_bitcast_2xi32_to_2xfloat( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0]; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r2}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_bitcast_2xi32_to_2xfloat( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0]; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r2}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_bitcast_2xi32_to_2xfloat( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_bitcast_2xi32_to_2xfloat_param_0]; +; CHECK-F32X2-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-F32X2-NEXT: ret; %r = bitcast <2 x i32> %a to <2 x float> ret <2 x float> %r } @@ -2851,31 +2887,57 @@ define <2 x float> @test_insertelement(<2 x float> %a, float %x) #0 { } define <2 x float> @test_sitofp_2xi32_to_2xfloat(<2 x i32> %a) #0 { -; CHECK-LABEL: test_sitofp_2xi32_to_2xfloat( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_to_2xfloat_param_0]; -; CHECK-NEXT: cvt.rn.f32.s32 %r3, %r2; -; CHECK-NEXT: cvt.rn.f32.s32 %r4, %r1; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_sitofp_2xi32_to_2xfloat( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_to_2xfloat_param_0]; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_sitofp_2xi32_to_2xfloat( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b32 %r<5>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_sitofp_2xi32_to_2xfloat_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-F32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-F32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-F32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-F32X2-NEXT: ret; %r = sitofp <2 x i32> %a to <2 x float> ret <2 x float> %r } define <2 x float> @test_uitofp_2xi32_to_2xfloat(<2 x i32> %a) #0 { -; CHECK-LABEL: test_uitofp_2xi32_to_2xfloat( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<5>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_to_2xfloat_param_0]; -; CHECK-NEXT: cvt.rn.f32.u32 %r3, %r2; -; CHECK-NEXT: cvt.rn.f32.u32 %r4, %r1; -; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; -; CHECK-NEXT: ret; +; CHECK-NOF32X2-LABEL: test_uitofp_2xi32_to_2xfloat( +; CHECK-NOF32X2: { +; CHECK-NOF32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOF32X2-EMPTY: +; CHECK-NOF32X2-NEXT: // %bb.0: +; CHECK-NOF32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_to_2xfloat_param_0]; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-NOF32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-NOF32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOF32X2-NEXT: ret; +; +; CHECK-F32X2-LABEL: test_uitofp_2xi32_to_2xfloat( +; CHECK-F32X2: { +; CHECK-F32X2-NEXT: .reg .b32 %r<5>; +; CHECK-F32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-F32X2-EMPTY: +; CHECK-F32X2-NEXT: // %bb.0: +; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_2xi32_to_2xfloat_param_0]; +; CHECK-F32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-F32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-F32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-F32X2-NEXT: ret; %r = uitofp <2 x i32> %a to <2 x float> ret <2 x float> %r } diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll index d219493d2b31b..3fac29f74125b 100644 --- a/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll +++ b/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll @@ -346,15 +346,19 @@ define i32 @ld_global_v8i32(ptr addrspace(1) %ptr) { ; SM100-LABEL: ld_global_v8i32( ; SM100: { ; SM100-NEXT: .reg .b32 %r<16>; -; SM100-NEXT: .reg .b64 %rd<2>; +; SM100-NEXT: .reg .b64 %rd<6>; ; SM100-EMPTY: ; SM100-NEXT: // %bb.0: ; SM100-NEXT: ld.param.b64 %rd1, [ld_global_v8i32_param_0]; -; SM100-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; SM100-NEXT: add.s32 %r9, %r1, %r2; -; SM100-NEXT: add.s32 %r10, %r3, %r4; -; SM100-NEXT: add.s32 %r11, %r5, %r6; -; SM100-NEXT: add.s32 %r12, %r7, %r8; +; SM100-NEXT: ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; SM100-NEXT: mov.b64 {%r1, %r2}, %rd5; +; SM100-NEXT: mov.b64 {%r3, %r4}, %rd4; +; SM100-NEXT: mov.b64 {%r5, %r6}, %rd3; +; SM100-NEXT: mov.b64 {%r7, %r8}, %rd2; +; SM100-NEXT: add.s32 %r9, %r7, %r8; +; SM100-NEXT: add.s32 %r10, %r5, %r6; +; SM100-NEXT: add.s32 %r11, %r3, %r4; +; SM100-NEXT: add.s32 %r12, %r1, %r2; ; SM100-NEXT: add.s32 %r13, %r9, %r10; ; SM100-NEXT: add.s32 %r14, %r11, %r12; ; SM100-NEXT: add.s32 %r15, %r13, %r14; diff --git a/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll b/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll index 12e3287e73f0f..57852451c0c72 100644 --- a/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll +++ b/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll @@ -82,11 +82,11 @@ define void @avar_bfloat() { define void @avar_i32() { ; PTX-LABEL: avar_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; +; PTX-NEXT: .reg .b64 %rd<5>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: -; PTX-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin]; -; PTX-NEXT: st.global.v8.b32 [globalout], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.nc.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin]; +; PTX-NEXT: st.global.v4.b64 [globalout], {%rd1, %rd2, %rd3, %rd4}; ; PTX-NEXT: ret; %load = load <8 x i32>, ptr addrspace(1) @globalin, !invariant.load !0 store <8 x i32> %load, ptr addrspace(1) @globalout @@ -202,11 +202,11 @@ define void @asi_bfloat() { define void @asi_i32() { ; PTX-LABEL: asi_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; +; PTX-NEXT: .reg .b64 %rd<5>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: -; PTX-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin+32]; -; PTX-NEXT: st.global.v8.b32 [globalout+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.nc.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin+32]; +; PTX-NEXT: st.global.v4.b64 [globalout+32], {%rd1, %rd2, %rd3, %rd4}; ; PTX-NEXT: ret; %in.offset = getelementptr inbounds i8, ptr addrspace(1) @globalin, i32 32 %load = load <8 x i32>, ptr addrspace(1) %in.offset, !invariant.load !0 @@ -331,14 +331,13 @@ define void @areg_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) { define void @areg_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) { ; PTX-LABEL: areg_64_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; -; PTX-NEXT: .reg .b64 %rd<3>; +; PTX-NEXT: .reg .b64 %rd<7>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: ; PTX-NEXT: ld.param.b64 %rd1, [areg_64_i32_param_0]; -; PTX-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; PTX-NEXT: ld.param.b64 %rd2, [areg_64_i32_param_1]; -; PTX-NEXT: st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; PTX-NEXT: ld.param.b64 %rd6, [areg_64_i32_param_1]; +; PTX-NEXT: st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5}; ; PTX-NEXT: ret; %load = load <8 x i32>, ptr addrspace(1) %in, !invariant.load !0 store <8 x i32> %load, ptr addrspace(1) %out @@ -472,14 +471,13 @@ define void @ari_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) { define void @ari_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) { ; PTX-LABEL: ari_64_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; -; PTX-NEXT: .reg .b64 %rd<3>; +; PTX-NEXT: .reg .b64 %rd<7>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: ; PTX-NEXT: ld.param.b64 %rd1, [ari_64_i32_param_0]; ; PTX-NEXT: ld.param.b64 %rd2, [ari_64_i32_param_1]; -; PTX-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1+32]; -; PTX-NEXT: st.global.v8.b32 [%rd2+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.nc.v4.b64 {%rd3, %rd4, %rd5, %rd6}, [%rd1+32]; +; PTX-NEXT: st.global.v4.b64 [%rd2+32], {%rd3, %rd4, %rd5, %rd6}; ; PTX-NEXT: ret; %in.offset = getelementptr inbounds i8, ptr addrspace(1) %in, i32 32 %load = load <8 x i32>, ptr addrspace(1) %in.offset, !invariant.load !0 diff --git a/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll b/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll index b7fa1dd5f2c4d..21604dfbf0013 100644 --- a/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll +++ b/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll @@ -78,11 +78,11 @@ define void @avar_bfloat() { define void @avar_i32() { ; PTX-LABEL: avar_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; +; PTX-NEXT: .reg .b64 %rd<5>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: -; PTX-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin]; -; PTX-NEXT: st.global.v8.b32 [globalout], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin]; +; PTX-NEXT: st.global.v4.b64 [globalout], {%rd1, %rd2, %rd3, %rd4}; ; PTX-NEXT: ret; %load = load <8 x i32>, ptr addrspace(1) @globalin store <8 x i32> %load, ptr addrspace(1) @globalout @@ -198,11 +198,11 @@ define void @asi_bfloat() { define void @asi_i32() { ; PTX-LABEL: asi_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; +; PTX-NEXT: .reg .b64 %rd<5>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: -; PTX-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin+32]; -; PTX-NEXT: st.global.v8.b32 [globalout+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin+32]; +; PTX-NEXT: st.global.v4.b64 [globalout+32], {%rd1, %rd2, %rd3, %rd4}; ; PTX-NEXT: ret; %in.offset = getelementptr inbounds i8, ptr addrspace(1) @globalin, i32 32 %load = load <8 x i32>, ptr addrspace(1) %in.offset @@ -327,14 +327,13 @@ define void @areg_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) { define void @areg_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) { ; PTX-LABEL: areg_64_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; -; PTX-NEXT: .reg .b64 %rd<3>; +; PTX-NEXT: .reg .b64 %rd<7>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: ; PTX-NEXT: ld.param.b64 %rd1, [areg_64_i32_param_0]; -; PTX-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; PTX-NEXT: ld.param.b64 %rd2, [areg_64_i32_param_1]; -; PTX-NEXT: st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; PTX-NEXT: ld.param.b64 %rd6, [areg_64_i32_param_1]; +; PTX-NEXT: st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5}; ; PTX-NEXT: ret; %load = load <8 x i32>, ptr addrspace(1) %in store <8 x i32> %load, ptr addrspace(1) %out @@ -468,14 +467,13 @@ define void @ari_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) { define void @ari_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) { ; PTX-LABEL: ari_64_i32( ; PTX: { -; PTX-NEXT: .reg .b32 %r<9>; -; PTX-NEXT: .reg .b64 %rd<3>; +; PTX-NEXT: .reg .b64 %rd<7>; ; PTX-EMPTY: ; PTX-NEXT: // %bb.0: ; PTX-NEXT: ld.param.b64 %rd1, [ari_64_i32_param_0]; ; PTX-NEXT: ld.param.b64 %rd2, [ari_64_i32_param_1]; -; PTX-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1+32]; -; PTX-NEXT: st.global.v8.b32 [%rd2+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; PTX-NEXT: ld.global.v4.b64 {%rd3, %rd4, %rd5, %rd6}, [%rd1+32]; +; PTX-NEXT: st.global.v4.b64 [%rd2+32], {%rd3, %rd4, %rd5, %rd6}; ; PTX-NEXT: ret; %in.offset = getelementptr inbounds i8, ptr addrspace(1) %in, i32 32 %load = load <8 x i32>, ptr addrspace(1) %in.offset diff --git a/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll b/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll index e8b43ad28ad27..b5319935f0f9d 100644 --- a/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll +++ b/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll @@ -100,19 +100,32 @@ define void @generic_16xbfloat(ptr %a, ptr %b) { } define void @generic_8xi32(ptr %a, ptr %b) { -; CHECK-LABEL: generic_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [generic_8xi32_param_0]; -; CHECK-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [generic_8xi32_param_1]; -; CHECK-NEXT: st.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: generic_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [generic_8xi32_param_0]; +; SM90-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [generic_8xi32_param_1]; +; SM90-NEXT: st.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: generic_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [generic_8xi32_param_0]; +; SM100-NEXT: ld.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [generic_8xi32_param_1]; +; SM100-NEXT: st.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load <8 x i32>, ptr %a store <8 x i32> %a.load, ptr %b ret void @@ -265,19 +278,32 @@ define void @generic_volatile_16xbfloat(ptr %a, ptr %b) { } define void @generic_volatile_8xi32(ptr %a, ptr %b) { -; CHECK-LABEL: generic_volatile_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [generic_volatile_8xi32_param_0]; -; CHECK-NEXT: ld.volatile.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.volatile.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [generic_volatile_8xi32_param_1]; -; CHECK-NEXT: st.volatile.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.volatile.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: generic_volatile_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [generic_volatile_8xi32_param_0]; +; SM90-NEXT: ld.volatile.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.volatile.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [generic_volatile_8xi32_param_1]; +; SM90-NEXT: st.volatile.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.volatile.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: generic_volatile_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [generic_volatile_8xi32_param_0]; +; SM100-NEXT: ld.volatile.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.volatile.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [generic_volatile_8xi32_param_1]; +; SM100-NEXT: st.volatile.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.volatile.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load volatile <8 x i32>, ptr %a store volatile <8 x i32> %a.load, ptr %b ret void @@ -496,14 +522,13 @@ define void @global_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) { ; ; SM100-LABEL: global_8xi32( ; SM100: { -; SM100-NEXT: .reg .b32 %r<9>; -; SM100-NEXT: .reg .b64 %rd<3>; +; SM100-NEXT: .reg .b64 %rd<7>; ; SM100-EMPTY: ; SM100-NEXT: // %bb.0: ; SM100-NEXT: ld.param.b64 %rd1, [global_8xi32_param_0]; -; SM100-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; SM100-NEXT: ld.param.b64 %rd2, [global_8xi32_param_1]; -; SM100-NEXT: st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; SM100-NEXT: ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; SM100-NEXT: ld.param.b64 %rd6, [global_8xi32_param_1]; +; SM100-NEXT: st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5}; ; SM100-NEXT: ret; %a.load = load <8 x i32>, ptr addrspace(1) %a store <8 x i32> %a.load, ptr addrspace(1) %b @@ -741,14 +766,13 @@ define void @global_volatile_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) { ; ; SM100-LABEL: global_volatile_8xi32( ; SM100: { -; SM100-NEXT: .reg .b32 %r<9>; -; SM100-NEXT: .reg .b64 %rd<3>; +; SM100-NEXT: .reg .b64 %rd<7>; ; SM100-EMPTY: ; SM100-NEXT: // %bb.0: ; SM100-NEXT: ld.param.b64 %rd1, [global_volatile_8xi32_param_0]; -; SM100-NEXT: ld.volatile.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1]; -; SM100-NEXT: ld.param.b64 %rd2, [global_volatile_8xi32_param_1]; -; SM100-NEXT: st.volatile.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}; +; SM100-NEXT: ld.volatile.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1]; +; SM100-NEXT: ld.param.b64 %rd6, [global_volatile_8xi32_param_1]; +; SM100-NEXT: st.volatile.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5}; ; SM100-NEXT: ret; %a.load = load volatile <8 x i32>, ptr addrspace(1) %a store volatile <8 x i32> %a.load, ptr addrspace(1) %b @@ -924,19 +948,32 @@ define void @shared_16xbfloat(ptr addrspace(3) %a, ptr addrspace(3) %b) { } define void @shared_8xi32(ptr addrspace(3) %a, ptr addrspace(3) %b) { -; CHECK-LABEL: shared_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [shared_8xi32_param_0]; -; CHECK-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [shared_8xi32_param_1]; -; CHECK-NEXT: st.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: shared_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [shared_8xi32_param_0]; +; SM90-NEXT: ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [shared_8xi32_param_1]; +; SM90-NEXT: st.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: shared_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [shared_8xi32_param_0]; +; SM100-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.shared.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [shared_8xi32_param_1]; +; SM100-NEXT: st.shared.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.shared.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load <8 x i32>, ptr addrspace(3) %a store <8 x i32> %a.load, ptr addrspace(3) %b ret void @@ -1089,19 +1126,32 @@ define void @shared_volatile_16xbfloat(ptr addrspace(3) %a, ptr addrspace(3) %b) } define void @shared_volatile_8xi32(ptr addrspace(3) %a, ptr addrspace(3) %b) { -; CHECK-LABEL: shared_volatile_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [shared_volatile_8xi32_param_0]; -; CHECK-NEXT: ld.volatile.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.volatile.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [shared_volatile_8xi32_param_1]; -; CHECK-NEXT: st.volatile.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.volatile.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: shared_volatile_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [shared_volatile_8xi32_param_0]; +; SM90-NEXT: ld.volatile.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.volatile.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [shared_volatile_8xi32_param_1]; +; SM90-NEXT: st.volatile.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.volatile.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: shared_volatile_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [shared_volatile_8xi32_param_0]; +; SM100-NEXT: ld.volatile.shared.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.volatile.shared.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [shared_volatile_8xi32_param_1]; +; SM100-NEXT: st.volatile.shared.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.volatile.shared.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load volatile <8 x i32>, ptr addrspace(3) %a store volatile <8 x i32> %a.load, ptr addrspace(3) %b ret void @@ -1256,19 +1306,32 @@ define void @local_16xbfloat(ptr addrspace(5) %a, ptr addrspace(5) %b) { } define void @local_8xi32(ptr addrspace(5) %a, ptr addrspace(5) %b) { -; CHECK-LABEL: local_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [local_8xi32_param_0]; -; CHECK-NEXT: ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [local_8xi32_param_1]; -; CHECK-NEXT: st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: local_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [local_8xi32_param_0]; +; SM90-NEXT: ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [local_8xi32_param_1]; +; SM90-NEXT: st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: local_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [local_8xi32_param_0]; +; SM100-NEXT: ld.local.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [local_8xi32_param_1]; +; SM100-NEXT: st.local.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.local.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load <8 x i32>, ptr addrspace(5) %a store <8 x i32> %a.load, ptr addrspace(5) %b ret void @@ -1421,19 +1484,32 @@ define void @local_volatile_16xbfloat(ptr addrspace(5) %a, ptr addrspace(5) %b) } define void @local_volatile_8xi32(ptr addrspace(5) %a, ptr addrspace(5) %b) { -; CHECK-LABEL: local_volatile_8xi32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<9>; -; CHECK-NEXT: .reg .b64 %rd<3>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.b64 %rd1, [local_volatile_8xi32_param_0]; -; CHECK-NEXT: ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; -; CHECK-NEXT: ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; -; CHECK-NEXT: ld.param.b64 %rd2, [local_volatile_8xi32_param_1]; -; CHECK-NEXT: st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; -; CHECK-NEXT: st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; -; CHECK-NEXT: ret; +; SM90-LABEL: local_volatile_8xi32( +; SM90: { +; SM90-NEXT: .reg .b32 %r<9>; +; SM90-NEXT: .reg .b64 %rd<3>; +; SM90-EMPTY: +; SM90-NEXT: // %bb.0: +; SM90-NEXT: ld.param.b64 %rd1, [local_volatile_8xi32_param_0]; +; SM90-NEXT: ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; SM90-NEXT: ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16]; +; SM90-NEXT: ld.param.b64 %rd2, [local_volatile_8xi32_param_1]; +; SM90-NEXT: st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8}; +; SM90-NEXT: st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; SM90-NEXT: ret; +; +; SM100-LABEL: local_volatile_8xi32( +; SM100: { +; SM100-NEXT: .reg .b64 %rd<7>; +; SM100-EMPTY: +; SM100-NEXT: // %bb.0: +; SM100-NEXT: ld.param.b64 %rd1, [local_volatile_8xi32_param_0]; +; SM100-NEXT: ld.local.v2.b64 {%rd2, %rd3}, [%rd1]; +; SM100-NEXT: ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16]; +; SM100-NEXT: ld.param.b64 %rd6, [local_volatile_8xi32_param_1]; +; SM100-NEXT: st.local.v2.b64 [%rd6+16], {%rd4, %rd5}; +; SM100-NEXT: st.local.v2.b64 [%rd6], {%rd2, %rd3}; +; SM100-NEXT: ret; %a.load = load volatile <8 x i32>, ptr addrspace(5) %a store volatile <8 x i32> %a.load, ptr addrspace(5) %b ret void diff --git a/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir b/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir new file mode 100644 index 0000000000000..0b2d85600a2ef --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir @@ -0,0 +1,80 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=nvptx64 -mcpu=sm_20 -run-pass=early-machinelicm %s -o - | FileCheck %s + +# This test checks that the early-machineLICM pass successfully creates a new +# loop preheader by splitting the critical edge and hoisting the loop invariant +# value `%8` to the preheader. +# Since the critical edge successor is a loop header, the splitting does not +# break the structured CFG, which is a requirement for the NVPTX target. + +--- +name: test_hoist +tracksRegLiveness: true +registers: + - { id: 0, class: b64, preferred-register: '', flags: [ ] } + - { id: 1, class: b32, preferred-register: '', flags: [ ] } + - { id: 2, class: b32, preferred-register: '', flags: [ ] } + - { id: 3, class: b32, preferred-register: '', flags: [ ] } + - { id: 4, class: b32, preferred-register: '', flags: [ ] } + - { id: 5, class: b32, preferred-register: '', flags: [ ] } + - { id: 6, class: b64, preferred-register: '', flags: [ ] } + - { id: 7, class: b1, preferred-register: '', flags: [ ] } + - { id: 8, class: b32, preferred-register: '', flags: [ ] } + - { id: 9, class: b1, preferred-register: '', flags: [ ] } +body: | + ; CHECK-LABEL: name: test_hoist + ; CHECK: bb.0.entry: + ; CHECK-NEXT: successors: %bb.2(0x30000000), %bb.3(0x50000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[LD_i32_:%[0-9]+]]:b32 = LD_i32 0, 0, 101, 3, 32, &test_hoist_param_1, 0 :: (dereferenceable invariant load (s32), addrspace 101) + ; CHECK-NEXT: [[LD_i64_:%[0-9]+]]:b64 = LD_i64 0, 0, 101, 3, 64, &test_hoist_param_0, 0 :: (dereferenceable invariant load (s64), addrspace 101) + ; CHECK-NEXT: [[ADD64ri:%[0-9]+]]:b64 = nuw ADD64ri killed [[LD_i64_]], 2 + ; CHECK-NEXT: [[LD_i32_1:%[0-9]+]]:b32 = LD_i32 0, 0, 1, 3, 32, [[ADD64ri]], 0 + ; CHECK-NEXT: [[SETP_i32ri:%[0-9]+]]:b1 = SETP_i32ri [[LD_i32_]], 0, 0 + ; CHECK-NEXT: CBranch killed [[SETP_i32ri]], %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.3: + ; CHECK-NEXT: successors: %bb.1(0x80000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[ADD32ri:%[0-9]+]]:b32 = ADD32ri [[LD_i32_]], -1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.1: + ; CHECK-NEXT: successors: %bb.2(0x04000000), %bb.1(0x7c000000) + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[PHI:%[0-9]+]]:b32 = PHI [[LD_i32_1]], %bb.3, %3, %bb.1 + ; CHECK-NEXT: [[SREM32rr:%[0-9]+]]:b32 = SREM32rr [[PHI]], [[ADD32ri]] + ; CHECK-NEXT: [[SETP_i32ri1:%[0-9]+]]:b1 = SETP_i32ri [[SREM32rr]], 0, 1 + ; CHECK-NEXT: CBranch killed [[SETP_i32ri1]], %bb.1 + ; CHECK-NEXT: GOTO %bb.2 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: bb.2: + ; CHECK-NEXT: [[PHI1:%[0-9]+]]:b32 = PHI [[LD_i32_1]], %bb.0, [[SREM32rr]], %bb.1 + ; CHECK-NEXT: ST_i32 [[PHI1]], 0, 0, 1, 32, [[ADD64ri]], 0 + ; CHECK-NEXT: Return + bb.0.entry: + successors: %bb.2(0x30000000), %bb.1(0x50000000) + + %5:b32 = LD_i32 0, 0, 101, 3, 32, &test_hoist_param_1, 0 :: (dereferenceable invariant load (s32), addrspace 101) + %6:b64 = LD_i64 0, 0, 101, 3, 64, &test_hoist_param_0, 0 :: (dereferenceable invariant load (s64), addrspace 101) + %0:b64 = nuw ADD64ri killed %6, 2 + %1:b32 = LD_i32 0, 0, 1, 3, 32, %0, 0 + %7:b1 = SETP_i32ri %5, 0, 0 + CBranch killed %7, %bb.2 + GOTO %bb.1 + + + bb.1: + successors: %bb.2(0x04000000), %bb.1(0x7c000000) + + %2:b32 = PHI %1, %bb.0, %3, %bb.1 + %8:b32 = ADD32ri %5, -1 + %3:b32 = SREM32rr %2, %8 + %9:b1 = SETP_i32ri %3, 0, 1 + CBranch killed %9, %bb.1 + GOTO %bb.2 + + bb.2: + %4:b32 = PHI %1, %bb.0, %3, %bb.1 + ST_i32 %4, 0, 0, 1, 32, %0, 0 + Return +... diff --git a/llvm/test/CodeGen/NVPTX/math-intrins.ll b/llvm/test/CodeGen/NVPTX/math-intrins.ll index 5a55fa97033b7..625c93c3f0a53 100644 --- a/llvm/test/CodeGen/NVPTX/math-intrins.ll +++ b/llvm/test/CodeGen/NVPTX/math-intrins.ll @@ -1586,54 +1586,25 @@ define double @minimumnum_double(double %a, double %b) { ret double %x } -; TODO Improve the "Expand" path for minimumnum vectors on targets where -; f16 is not supported. Ideally it should use two f32 minimumnums first instead of -; fully expanding the minimumnum instruction into compare/select instructions. define <2 x half> @minimumnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-NOF16-LABEL: minimumnum_v2half( ; CHECK-NOF16: { -; CHECK-NOF16-NEXT: .reg .pred %p<13>; -; CHECK-NOF16-NEXT: .reg .b16 %rs<17>; -; CHECK-NOF16-NEXT: .reg .b32 %r<11>; +; CHECK-NOF16-NEXT: .reg .b16 %rs<7>; +; CHECK-NOF16-NEXT: .reg .b32 %r<8>; ; CHECK-NOF16-EMPTY: ; CHECK-NOF16-NEXT: // %bb.0: ; CHECK-NOF16-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [minimumnum_v2half_param_0]; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r1, %rs2; -; CHECK-NOF16-NEXT: setp.nan.f32 %p1, %r1, %r1; ; CHECK-NOF16-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [minimumnum_v2half_param_1]; -; CHECK-NOF16-NEXT: selp.b16 %rs5, %rs4, %rs2, %p1; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r2, %rs5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r3, %rs4; -; CHECK-NOF16-NEXT: setp.nan.f32 %p2, %r3, %r3; -; CHECK-NOF16-NEXT: selp.b16 %rs6, %rs5, %rs4, %p2; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs6; -; CHECK-NOF16-NEXT: setp.lt.f32 %p3, %r2, %r4; -; CHECK-NOF16-NEXT: selp.b16 %rs7, %rs5, %rs6, %p3; -; CHECK-NOF16-NEXT: setp.eq.b16 %p4, %rs5, -32768; -; CHECK-NOF16-NEXT: selp.b16 %rs8, %rs5, %rs7, %p4; -; CHECK-NOF16-NEXT: setp.eq.b16 %p5, %rs6, -32768; -; CHECK-NOF16-NEXT: selp.b16 %rs9, %rs6, %rs8, %p5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs7; -; CHECK-NOF16-NEXT: setp.eq.f32 %p6, %r5, 0f00000000; -; CHECK-NOF16-NEXT: selp.b16 %rs10, %rs9, %rs7, %p6; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r6, %rs1; -; CHECK-NOF16-NEXT: setp.nan.f32 %p7, %r6, %r6; -; CHECK-NOF16-NEXT: selp.b16 %rs11, %rs3, %rs1, %p7; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r7, %rs11; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r8, %rs3; -; CHECK-NOF16-NEXT: setp.nan.f32 %p8, %r8, %r8; -; CHECK-NOF16-NEXT: selp.b16 %rs12, %rs11, %rs3, %p8; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r9, %rs12; -; CHECK-NOF16-NEXT: setp.lt.f32 %p9, %r7, %r9; -; CHECK-NOF16-NEXT: selp.b16 %rs13, %rs11, %rs12, %p9; -; CHECK-NOF16-NEXT: setp.eq.b16 %p10, %rs11, -32768; -; CHECK-NOF16-NEXT: selp.b16 %rs14, %rs11, %rs13, %p10; -; CHECK-NOF16-NEXT: setp.eq.b16 %p11, %rs12, -32768; -; CHECK-NOF16-NEXT: selp.b16 %rs15, %rs12, %rs14, %p11; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r10, %rs13; -; CHECK-NOF16-NEXT: setp.eq.f32 %p12, %r10, 0f00000000; -; CHECK-NOF16-NEXT: selp.b16 %rs16, %rs15, %rs13, %p12; -; CHECK-NOF16-NEXT: st.param.v2.b16 [func_retval0], {%rs16, %rs10}; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r1, %rs4; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r2, %rs2; +; CHECK-NOF16-NEXT: min.f32 %r3, %r2, %r1; +; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs5, %r3; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs3; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs1; +; CHECK-NOF16-NEXT: min.f32 %r6, %r5, %r4; +; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %r6; +; CHECK-NOF16-NEXT: mov.b32 %r7, {%rs6, %rs5}; +; CHECK-NOF16-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: minimumnum_v2half( @@ -1649,48 +1620,22 @@ define <2 x half> @minimumnum_v2half(<2 x half> %a, <2 x half> %b) { ; ; CHECK-SM80-NOF16-LABEL: minimumnum_v2half( ; CHECK-SM80-NOF16: { -; CHECK-SM80-NOF16-NEXT: .reg .pred %p<13>; -; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<17>; -; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<11>; +; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<7>; +; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<8>; ; CHECK-SM80-NOF16-EMPTY: ; CHECK-SM80-NOF16-NEXT: // %bb.0: ; CHECK-SM80-NOF16-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [minimumnum_v2half_param_0]; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r1, %rs2; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p1, %r1, %r1; ; CHECK-SM80-NOF16-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [minimumnum_v2half_param_1]; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs5, %rs4, %rs2, %p1; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r2, %rs5; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r3, %rs4; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p2, %r3, %r3; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs6, %rs5, %rs4, %p2; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r4, %rs6; -; CHECK-SM80-NOF16-NEXT: setp.lt.f32 %p3, %r2, %r4; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs7, %rs5, %rs6, %p3; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p4, %rs5, -32768; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs8, %rs5, %rs7, %p4; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p5, %rs6, -32768; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs9, %rs6, %rs8, %p5; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r5, %rs7; -; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p6, %r5, 0f00000000; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs10, %rs9, %rs7, %p6; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r6, %rs1; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p7, %r6, %r6; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs11, %rs3, %rs1, %p7; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r7, %rs11; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r8, %rs3; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p8, %r8, %r8; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs12, %rs11, %rs3, %p8; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r9, %rs12; -; CHECK-SM80-NOF16-NEXT: setp.lt.f32 %p9, %r7, %r9; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs13, %rs11, %rs12, %p9; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p10, %rs11, -32768; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs14, %rs11, %rs13, %p10; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p11, %rs12, -32768; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs15, %rs12, %rs14, %p11; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r10, %rs13; -; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p12, %r10, 0f00000000; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs16, %rs15, %rs13, %p12; -; CHECK-SM80-NOF16-NEXT: st.param.v2.b16 [func_retval0], {%rs16, %rs10}; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r1, %rs4; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r2, %rs2; +; CHECK-SM80-NOF16-NEXT: min.f32 %r3, %r2, %r1; +; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs5, %r3; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r4, %rs3; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r5, %rs1; +; CHECK-SM80-NOF16-NEXT: min.f32 %r6, %r5, %r4; +; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %r6; +; CHECK-SM80-NOF16-NEXT: mov.b32 %r7, {%rs6, %rs5}; +; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-SM80-NOF16-NEXT: ret; %x = call <2 x half> @llvm.minimumnum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %x @@ -1788,54 +1733,25 @@ define double @maximumnum_double(double %a, double %b) { ret double %x } -; TODO Improve the "Expand" path for maximumnum vectors on targets where -; f16 is not supported. Ideally it should use two f32 maximumnums first instead of -; fully expanding the maximumnum instruction into compare/select instructions. define <2 x half> @maximumnum_v2half(<2 x half> %a, <2 x half> %b) { ; CHECK-NOF16-LABEL: maximumnum_v2half( ; CHECK-NOF16: { -; CHECK-NOF16-NEXT: .reg .pred %p<13>; -; CHECK-NOF16-NEXT: .reg .b16 %rs<17>; -; CHECK-NOF16-NEXT: .reg .b32 %r<11>; +; CHECK-NOF16-NEXT: .reg .b16 %rs<7>; +; CHECK-NOF16-NEXT: .reg .b32 %r<8>; ; CHECK-NOF16-EMPTY: ; CHECK-NOF16-NEXT: // %bb.0: ; CHECK-NOF16-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [maximumnum_v2half_param_0]; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r1, %rs2; -; CHECK-NOF16-NEXT: setp.nan.f32 %p1, %r1, %r1; ; CHECK-NOF16-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [maximumnum_v2half_param_1]; -; CHECK-NOF16-NEXT: selp.b16 %rs5, %rs4, %rs2, %p1; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r2, %rs5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r3, %rs4; -; CHECK-NOF16-NEXT: setp.nan.f32 %p2, %r3, %r3; -; CHECK-NOF16-NEXT: selp.b16 %rs6, %rs5, %rs4, %p2; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs6; -; CHECK-NOF16-NEXT: setp.gt.f32 %p3, %r2, %r4; -; CHECK-NOF16-NEXT: selp.b16 %rs7, %rs5, %rs6, %p3; -; CHECK-NOF16-NEXT: setp.eq.b16 %p4, %rs5, 0; -; CHECK-NOF16-NEXT: selp.b16 %rs8, %rs5, %rs7, %p4; -; CHECK-NOF16-NEXT: setp.eq.b16 %p5, %rs6, 0; -; CHECK-NOF16-NEXT: selp.b16 %rs9, %rs6, %rs8, %p5; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs7; -; CHECK-NOF16-NEXT: setp.eq.f32 %p6, %r5, 0f00000000; -; CHECK-NOF16-NEXT: selp.b16 %rs10, %rs9, %rs7, %p6; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r6, %rs1; -; CHECK-NOF16-NEXT: setp.nan.f32 %p7, %r6, %r6; -; CHECK-NOF16-NEXT: selp.b16 %rs11, %rs3, %rs1, %p7; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r7, %rs11; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r8, %rs3; -; CHECK-NOF16-NEXT: setp.nan.f32 %p8, %r8, %r8; -; CHECK-NOF16-NEXT: selp.b16 %rs12, %rs11, %rs3, %p8; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r9, %rs12; -; CHECK-NOF16-NEXT: setp.gt.f32 %p9, %r7, %r9; -; CHECK-NOF16-NEXT: selp.b16 %rs13, %rs11, %rs12, %p9; -; CHECK-NOF16-NEXT: setp.eq.b16 %p10, %rs11, 0; -; CHECK-NOF16-NEXT: selp.b16 %rs14, %rs11, %rs13, %p10; -; CHECK-NOF16-NEXT: setp.eq.b16 %p11, %rs12, 0; -; CHECK-NOF16-NEXT: selp.b16 %rs15, %rs12, %rs14, %p11; -; CHECK-NOF16-NEXT: cvt.f32.f16 %r10, %rs13; -; CHECK-NOF16-NEXT: setp.eq.f32 %p12, %r10, 0f00000000; -; CHECK-NOF16-NEXT: selp.b16 %rs16, %rs15, %rs13, %p12; -; CHECK-NOF16-NEXT: st.param.v2.b16 [func_retval0], {%rs16, %rs10}; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r1, %rs4; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r2, %rs2; +; CHECK-NOF16-NEXT: max.f32 %r3, %r2, %r1; +; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs5, %r3; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r4, %rs3; +; CHECK-NOF16-NEXT: cvt.f32.f16 %r5, %rs1; +; CHECK-NOF16-NEXT: max.f32 %r6, %r5, %r4; +; CHECK-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %r6; +; CHECK-NOF16-NEXT: mov.b32 %r7, {%rs6, %rs5}; +; CHECK-NOF16-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-NOF16-NEXT: ret; ; ; CHECK-F16-LABEL: maximumnum_v2half( @@ -1851,48 +1767,22 @@ define <2 x half> @maximumnum_v2half(<2 x half> %a, <2 x half> %b) { ; ; CHECK-SM80-NOF16-LABEL: maximumnum_v2half( ; CHECK-SM80-NOF16: { -; CHECK-SM80-NOF16-NEXT: .reg .pred %p<13>; -; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<17>; -; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<11>; +; CHECK-SM80-NOF16-NEXT: .reg .b16 %rs<7>; +; CHECK-SM80-NOF16-NEXT: .reg .b32 %r<8>; ; CHECK-SM80-NOF16-EMPTY: ; CHECK-SM80-NOF16-NEXT: // %bb.0: ; CHECK-SM80-NOF16-NEXT: ld.param.v2.b16 {%rs1, %rs2}, [maximumnum_v2half_param_0]; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r1, %rs2; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p1, %r1, %r1; ; CHECK-SM80-NOF16-NEXT: ld.param.v2.b16 {%rs3, %rs4}, [maximumnum_v2half_param_1]; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs5, %rs4, %rs2, %p1; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r2, %rs5; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r3, %rs4; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p2, %r3, %r3; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs6, %rs5, %rs4, %p2; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r4, %rs6; -; CHECK-SM80-NOF16-NEXT: setp.gt.f32 %p3, %r2, %r4; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs7, %rs5, %rs6, %p3; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p4, %rs5, 0; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs8, %rs5, %rs7, %p4; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p5, %rs6, 0; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs9, %rs6, %rs8, %p5; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r5, %rs7; -; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p6, %r5, 0f00000000; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs10, %rs9, %rs7, %p6; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r6, %rs1; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p7, %r6, %r6; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs11, %rs3, %rs1, %p7; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r7, %rs11; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r8, %rs3; -; CHECK-SM80-NOF16-NEXT: setp.nan.f32 %p8, %r8, %r8; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs12, %rs11, %rs3, %p8; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r9, %rs12; -; CHECK-SM80-NOF16-NEXT: setp.gt.f32 %p9, %r7, %r9; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs13, %rs11, %rs12, %p9; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p10, %rs11, 0; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs14, %rs11, %rs13, %p10; -; CHECK-SM80-NOF16-NEXT: setp.eq.b16 %p11, %rs12, 0; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs15, %rs12, %rs14, %p11; -; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r10, %rs13; -; CHECK-SM80-NOF16-NEXT: setp.eq.f32 %p12, %r10, 0f00000000; -; CHECK-SM80-NOF16-NEXT: selp.b16 %rs16, %rs15, %rs13, %p12; -; CHECK-SM80-NOF16-NEXT: st.param.v2.b16 [func_retval0], {%rs16, %rs10}; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r1, %rs4; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r2, %rs2; +; CHECK-SM80-NOF16-NEXT: max.f32 %r3, %r2, %r1; +; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs5, %r3; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r4, %rs3; +; CHECK-SM80-NOF16-NEXT: cvt.f32.f16 %r5, %rs1; +; CHECK-SM80-NOF16-NEXT: max.f32 %r6, %r5, %r4; +; CHECK-SM80-NOF16-NEXT: cvt.rn.f16.f32 %rs6, %r6; +; CHECK-SM80-NOF16-NEXT: mov.b32 %r7, {%rs6, %rs5}; +; CHECK-SM80-NOF16-NEXT: st.param.b32 [func_retval0], %r7; ; CHECK-SM80-NOF16-NEXT: ret; %x = call <2 x half> @llvm.maximumnum.v2f16(<2 x half> %a, <2 x half> %b) ret <2 x half> %x diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll index f871e4039a558..87787ba2bf81c 100644 --- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll +++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll @@ -1452,22 +1452,44 @@ define i16 @reduce_add_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_add_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_add_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0]; -; CHECK-NEXT: add.s32 %r9, %r4, %r8; -; CHECK-NEXT: add.s32 %r10, %r2, %r6; -; CHECK-NEXT: add.s32 %r11, %r10, %r9; -; CHECK-NEXT: add.s32 %r12, %r3, %r7; -; CHECK-NEXT: add.s32 %r13, %r1, %r5; -; CHECK-NEXT: add.s32 %r14, %r13, %r12; -; CHECK-NEXT: add.s32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_add_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0]; +; CHECK-SM80-NEXT: add.s32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: add.s32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: add.s32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: add.s32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: add.s32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: add.s32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_add_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_add_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_add_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: add.s32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: add.s32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: add.s32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: add.s32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: add.s32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: add.s32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: add.s32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.add(<8 x i32> %in) ret i32 %res } @@ -1543,22 +1565,44 @@ define i16 @reduce_mul_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_mul_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_mul_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0]; -; CHECK-NEXT: mul.lo.s32 %r9, %r4, %r8; -; CHECK-NEXT: mul.lo.s32 %r10, %r2, %r6; -; CHECK-NEXT: mul.lo.s32 %r11, %r10, %r9; -; CHECK-NEXT: mul.lo.s32 %r12, %r3, %r7; -; CHECK-NEXT: mul.lo.s32 %r13, %r1, %r5; -; CHECK-NEXT: mul.lo.s32 %r14, %r13, %r12; -; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_mul_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0]; +; CHECK-SM80-NEXT: mul.lo.s32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: mul.lo.s32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: mul.lo.s32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: mul.lo.s32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: mul.lo.s32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: mul.lo.s32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: mul.lo.s32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_mul_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_mul_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_mul_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: mul.lo.s32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: mul.lo.s32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: mul.lo.s32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: mul.lo.s32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: mul.lo.s32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: mul.lo.s32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: mul.lo.s32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.mul(<8 x i32> %in) ret i32 %res } @@ -1673,22 +1717,44 @@ define i16 @reduce_umax_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_umax_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_umax_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0]; -; CHECK-NEXT: max.u32 %r9, %r4, %r8; -; CHECK-NEXT: max.u32 %r10, %r2, %r6; -; CHECK-NEXT: max.u32 %r11, %r10, %r9; -; CHECK-NEXT: max.u32 %r12, %r3, %r7; -; CHECK-NEXT: max.u32 %r13, %r1, %r5; -; CHECK-NEXT: max.u32 %r14, %r13, %r12; -; CHECK-NEXT: max.u32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_umax_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0]; +; CHECK-SM80-NEXT: max.u32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: max.u32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: max.u32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.u32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: max.u32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: max.u32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.u32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_umax_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_umax_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_umax_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: max.u32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: max.u32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: max.u32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: max.u32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: max.u32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: max.u32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: max.u32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.umax(<8 x i32> %in) ret i32 %res } @@ -1803,22 +1869,44 @@ define i16 @reduce_umin_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_umin_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_umin_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0]; -; CHECK-NEXT: min.u32 %r9, %r4, %r8; -; CHECK-NEXT: min.u32 %r10, %r2, %r6; -; CHECK-NEXT: min.u32 %r11, %r10, %r9; -; CHECK-NEXT: min.u32 %r12, %r3, %r7; -; CHECK-NEXT: min.u32 %r13, %r1, %r5; -; CHECK-NEXT: min.u32 %r14, %r13, %r12; -; CHECK-NEXT: min.u32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_umin_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0]; +; CHECK-SM80-NEXT: min.u32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: min.u32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: min.u32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.u32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: min.u32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: min.u32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.u32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_umin_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_umin_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_umin_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: min.u32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: min.u32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: min.u32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: min.u32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: min.u32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: min.u32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: min.u32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.umin(<8 x i32> %in) ret i32 %res } @@ -1933,22 +2021,44 @@ define i16 @reduce_smax_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_smax_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_smax_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0]; -; CHECK-NEXT: max.s32 %r9, %r4, %r8; -; CHECK-NEXT: max.s32 %r10, %r2, %r6; -; CHECK-NEXT: max.s32 %r11, %r10, %r9; -; CHECK-NEXT: max.s32 %r12, %r3, %r7; -; CHECK-NEXT: max.s32 %r13, %r1, %r5; -; CHECK-NEXT: max.s32 %r14, %r13, %r12; -; CHECK-NEXT: max.s32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_smax_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0]; +; CHECK-SM80-NEXT: max.s32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: max.s32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: max.s32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: max.s32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: max.s32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: max.s32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: max.s32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_smax_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_smax_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_smax_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: max.s32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: max.s32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: max.s32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: max.s32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: max.s32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: max.s32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: max.s32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.smax(<8 x i32> %in) ret i32 %res } @@ -2063,22 +2173,44 @@ define i16 @reduce_smin_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_smin_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_smin_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0]; -; CHECK-NEXT: min.s32 %r9, %r4, %r8; -; CHECK-NEXT: min.s32 %r10, %r2, %r6; -; CHECK-NEXT: min.s32 %r11, %r10, %r9; -; CHECK-NEXT: min.s32 %r12, %r3, %r7; -; CHECK-NEXT: min.s32 %r13, %r1, %r5; -; CHECK-NEXT: min.s32 %r14, %r13, %r12; -; CHECK-NEXT: min.s32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_smin_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0]; +; CHECK-SM80-NEXT: min.s32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: min.s32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: min.s32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: min.s32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: min.s32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: min.s32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: min.s32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_smin_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_smin_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_smin_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: min.s32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: min.s32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: min.s32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: min.s32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: min.s32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: min.s32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: min.s32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.smin(<8 x i32> %in) ret i32 %res } @@ -2152,22 +2284,44 @@ define i16 @reduce_and_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_and_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_and_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0]; -; CHECK-NEXT: and.b32 %r9, %r4, %r8; -; CHECK-NEXT: and.b32 %r10, %r2, %r6; -; CHECK-NEXT: and.b32 %r11, %r10, %r9; -; CHECK-NEXT: and.b32 %r12, %r3, %r7; -; CHECK-NEXT: and.b32 %r13, %r1, %r5; -; CHECK-NEXT: and.b32 %r14, %r13, %r12; -; CHECK-NEXT: and.b32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_and_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0]; +; CHECK-SM80-NEXT: and.b32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: and.b32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: and.b32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: and.b32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: and.b32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: and.b32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: and.b32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_and_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_and_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_and_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: and.b32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: and.b32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: and.b32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: and.b32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: and.b32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: and.b32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: and.b32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.and(<8 x i32> %in) ret i32 %res } @@ -2241,22 +2395,44 @@ define i16 @reduce_or_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_or_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_or_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0]; -; CHECK-NEXT: or.b32 %r9, %r4, %r8; -; CHECK-NEXT: or.b32 %r10, %r2, %r6; -; CHECK-NEXT: or.b32 %r11, %r10, %r9; -; CHECK-NEXT: or.b32 %r12, %r3, %r7; -; CHECK-NEXT: or.b32 %r13, %r1, %r5; -; CHECK-NEXT: or.b32 %r14, %r13, %r12; -; CHECK-NEXT: or.b32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_or_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0]; +; CHECK-SM80-NEXT: or.b32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: or.b32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: or.b32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: or.b32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: or.b32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: or.b32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: or.b32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_or_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_or_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_or_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: or.b32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: or.b32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: or.b32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: or.b32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: or.b32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: or.b32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: or.b32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.or(<8 x i32> %in) ret i32 %res } @@ -2330,22 +2506,44 @@ define i16 @reduce_xor_i16_nonpow2(<7 x i16> %in) { } define i32 @reduce_xor_i32(<8 x i32> %in) { -; CHECK-LABEL: reduce_xor_i32( -; CHECK: { -; CHECK-NEXT: .reg .b32 %r<16>; -; CHECK-EMPTY: -; CHECK-NEXT: // %bb.0: -; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16]; -; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0]; -; CHECK-NEXT: xor.b32 %r9, %r4, %r8; -; CHECK-NEXT: xor.b32 %r10, %r2, %r6; -; CHECK-NEXT: xor.b32 %r11, %r10, %r9; -; CHECK-NEXT: xor.b32 %r12, %r3, %r7; -; CHECK-NEXT: xor.b32 %r13, %r1, %r5; -; CHECK-NEXT: xor.b32 %r14, %r13, %r12; -; CHECK-NEXT: xor.b32 %r15, %r14, %r11; -; CHECK-NEXT: st.param.b32 [func_retval0], %r15; -; CHECK-NEXT: ret; +; CHECK-SM80-LABEL: reduce_xor_i32( +; CHECK-SM80: { +; CHECK-SM80-NEXT: .reg .b32 %r<16>; +; CHECK-SM80-EMPTY: +; CHECK-SM80-NEXT: // %bb.0: +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16]; +; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0]; +; CHECK-SM80-NEXT: xor.b32 %r9, %r4, %r8; +; CHECK-SM80-NEXT: xor.b32 %r10, %r2, %r6; +; CHECK-SM80-NEXT: xor.b32 %r11, %r10, %r9; +; CHECK-SM80-NEXT: xor.b32 %r12, %r3, %r7; +; CHECK-SM80-NEXT: xor.b32 %r13, %r1, %r5; +; CHECK-SM80-NEXT: xor.b32 %r14, %r13, %r12; +; CHECK-SM80-NEXT: xor.b32 %r15, %r14, %r11; +; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM80-NEXT: ret; +; +; CHECK-SM100-LABEL: reduce_xor_i32( +; CHECK-SM100: { +; CHECK-SM100-NEXT: .reg .b32 %r<16>; +; CHECK-SM100-NEXT: .reg .b64 %rd<5>; +; CHECK-SM100-EMPTY: +; CHECK-SM100-NEXT: // %bb.0: +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_xor_i32_param_0+16]; +; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_xor_i32_param_0]; +; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd2; +; CHECK-SM100-NEXT: xor.b32 %r5, %r4, %r2; +; CHECK-SM100-NEXT: mov.b64 {%r6, %r7}, %rd3; +; CHECK-SM100-NEXT: mov.b64 {%r8, %r9}, %rd1; +; CHECK-SM100-NEXT: xor.b32 %r10, %r9, %r7; +; CHECK-SM100-NEXT: xor.b32 %r11, %r10, %r5; +; CHECK-SM100-NEXT: xor.b32 %r12, %r3, %r1; +; CHECK-SM100-NEXT: xor.b32 %r13, %r8, %r6; +; CHECK-SM100-NEXT: xor.b32 %r14, %r13, %r12; +; CHECK-SM100-NEXT: xor.b32 %r15, %r14, %r11; +; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r15; +; CHECK-SM100-NEXT: ret; %res = call i32 @llvm.vector.reduce.xor(<8 x i32> %in) ret i32 %res } diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll index aaabd76e163bb..fd0b494d57677 100644 --- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll +++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll @@ -20,18 +20,18 @@ define float @select_oeq_float(float %a, float %b, float %c, float %d) { ; FAST-P8-LABEL: select_oeq_float: ; FAST-P8: # %bb.0: # %entry -; FAST-P8-NEXT: xssubsp f0, f2, f1 -; FAST-P8-NEXT: xssubsp f1, f1, f2 -; FAST-P8-NEXT: fsel f1, f1, f3, f4 -; FAST-P8-NEXT: fsel f1, f0, f1, f4 +; FAST-P8-NEXT: xssubsp f0, f1, f2 +; FAST-P8-NEXT: xsnegdp f1, f0 +; FAST-P8-NEXT: fsel f0, f0, f3, f4 +; FAST-P8-NEXT: fsel f1, f1, f0, f4 ; FAST-P8-NEXT: blr ; ; FAST-P9-LABEL: select_oeq_float: ; FAST-P9: # %bb.0: # %entry -; FAST-P9-NEXT: xssubsp f0, f2, f1 -; FAST-P9-NEXT: xssubsp f1, f1, f2 -; FAST-P9-NEXT: fsel f1, f1, f3, f4 -; FAST-P9-NEXT: fsel f1, f0, f1, f4 +; FAST-P9-NEXT: xssubsp f0, f1, f2 +; FAST-P9-NEXT: xsnegdp f1, f0 +; FAST-P9-NEXT: fsel f0, f0, f3, f4 +; FAST-P9-NEXT: fsel f1, f1, f0, f4 ; FAST-P9-NEXT: blr ; ; NO-FAST-P8-LABEL: select_oeq_float: @@ -59,6 +59,48 @@ entry: ret float %cond } +define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) { +; FAST-P8-LABEL: select_oeq_float_nsz: +; FAST-P8: # %bb.0: # %entry +; FAST-P8-NEXT: xssubsp f0, f2, f1 +; FAST-P8-NEXT: xssubsp f1, f1, f2 +; FAST-P8-NEXT: fsel f1, f1, f3, f4 +; FAST-P8-NEXT: fsel f1, f0, f1, f4 +; FAST-P8-NEXT: blr +; +; FAST-P9-LABEL: select_oeq_float_nsz: +; FAST-P9: # %bb.0: # %entry +; FAST-P9-NEXT: xssubsp f0, f2, f1 +; FAST-P9-NEXT: xssubsp f1, f1, f2 +; FAST-P9-NEXT: fsel f1, f1, f3, f4 +; FAST-P9-NEXT: fsel f1, f0, f1, f4 +; FAST-P9-NEXT: blr +; +; NO-FAST-P8-LABEL: select_oeq_float_nsz: +; NO-FAST-P8: # %bb.0: # %entry +; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P8-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P8-NEXT: # %bb.1: # %entry +; NO-FAST-P8-NEXT: fmr f3, f4 +; NO-FAST-P8-NEXT: .LBB1_2: # %entry +; NO-FAST-P8-NEXT: fmr f1, f3 +; NO-FAST-P8-NEXT: blr +; +; NO-FAST-P9-LABEL: select_oeq_float_nsz: +; NO-FAST-P9: # %bb.0: # %entry +; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P9-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P9-NEXT: # %bb.1: # %entry +; NO-FAST-P9-NEXT: fmr f3, f4 +; NO-FAST-P9-NEXT: .LBB1_2: # %entry +; NO-FAST-P9-NEXT: fmr f1, f3 +; NO-FAST-P9-NEXT: blr +entry: + %cmp = fcmp nsz oeq float %a, %b + %cond = select i1 %cmp, float %c, float %d + ret float %cond +} + define double @select_oeq_double(double %a, double %b, double %c, double %d) { ; FAST-P8-LABEL: select_oeq_double: ; FAST-P8: # %bb.0: # %entry @@ -79,20 +121,20 @@ define double @select_oeq_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_oeq_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P8-NEXT: beq cr0, .LBB2_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB1_2: # %entry +; NO-FAST-P8-NEXT: .LBB2_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_oeq_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: beq cr0, .LBB1_2 +; NO-FAST-P9-NEXT: beq cr0, .LBB2_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB1_2: # %entry +; NO-FAST-P9-NEXT: .LBB2_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -182,13 +224,57 @@ entry: define float @select_one_float(float %a, float %b, float %c, float %d) { ; FAST-P8-LABEL: select_one_float: ; FAST-P8: # %bb.0: # %entry +; FAST-P8-NEXT: xssubsp f0, f1, f2 +; FAST-P8-NEXT: xsnegdp f1, f0 +; FAST-P8-NEXT: fsel f0, f0, f4, f3 +; FAST-P8-NEXT: fsel f1, f1, f0, f3 +; FAST-P8-NEXT: blr +; +; FAST-P9-LABEL: select_one_float: +; FAST-P9: # %bb.0: # %entry +; FAST-P9-NEXT: xssubsp f0, f1, f2 +; FAST-P9-NEXT: xsnegdp f1, f0 +; FAST-P9-NEXT: fsel f0, f0, f4, f3 +; FAST-P9-NEXT: fsel f1, f1, f0, f3 +; FAST-P9-NEXT: blr +; +; NO-FAST-P8-LABEL: select_one_float: +; NO-FAST-P8: # %bb.0: # %entry +; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P8-NEXT: # %bb.1: # %entry +; NO-FAST-P8-NEXT: fmr f3, f4 +; NO-FAST-P8-NEXT: .LBB5_2: # %entry +; NO-FAST-P8-NEXT: fmr f1, f3 +; NO-FAST-P8-NEXT: blr +; +; NO-FAST-P9-LABEL: select_one_float: +; NO-FAST-P9: # %bb.0: # %entry +; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 +; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P9-NEXT: # %bb.1: # %entry +; NO-FAST-P9-NEXT: fmr f3, f4 +; NO-FAST-P9-NEXT: .LBB5_2: # %entry +; NO-FAST-P9-NEXT: fmr f1, f3 +; NO-FAST-P9-NEXT: blr +entry: + %cmp = fcmp one float %a, %b + %cond = select i1 %cmp, float %c, float %d + ret float %cond +} + +define float @select_one_float_nsz(float %a, float %b, float %c, float %d) { +; FAST-P8-LABEL: select_one_float_nsz: +; FAST-P8: # %bb.0: # %entry ; FAST-P8-NEXT: xssubsp f0, f2, f1 ; FAST-P8-NEXT: xssubsp f1, f1, f2 ; FAST-P8-NEXT: fsel f1, f1, f4, f3 ; FAST-P8-NEXT: fsel f1, f0, f1, f3 ; FAST-P8-NEXT: blr ; -; FAST-P9-LABEL: select_one_float: +; FAST-P9-LABEL: select_one_float_nsz: ; FAST-P9: # %bb.0: # %entry ; FAST-P9-NEXT: xssubsp f0, f2, f1 ; FAST-P9-NEXT: xssubsp f1, f1, f2 @@ -196,29 +282,29 @@ define float @select_one_float(float %a, float %b, float %c, float %d) { ; FAST-P9-NEXT: fsel f1, f0, f1, f3 ; FAST-P9-NEXT: blr ; -; NO-FAST-P8-LABEL: select_one_float: +; NO-FAST-P8-LABEL: select_one_float_nsz: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB4_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB4_2: # %entry +; NO-FAST-P8-NEXT: .LBB6_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; -; NO-FAST-P9-LABEL: select_one_float: +; NO-FAST-P9-LABEL: select_one_float_nsz: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB4_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB4_2: # %entry +; NO-FAST-P9-NEXT: .LBB6_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: - %cmp = fcmp one float %a, %b + %cmp = fcmp nsz one float %a, %b %cond = select i1 %cmp, float %c, float %d ret float %cond } @@ -244,10 +330,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB5_2: # %entry +; NO-FAST-P8-NEXT: .LBB7_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -255,10 +341,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB5_2: # %entry +; NO-FAST-P9-NEXT: .LBB7_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -362,10 +448,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB8_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB8_2: # %entry +; NO-FAST-P8-NEXT: .LBB10_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -373,10 +459,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB8_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB8_2: # %entry +; NO-FAST-P9-NEXT: .LBB10_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -402,10 +488,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB9_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB9_2: # %entry +; NO-FAST-P8-NEXT: .LBB11_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -413,10 +499,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB9_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB9_2: # %entry +; NO-FAST-P9-NEXT: .LBB11_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -503,20 +589,20 @@ define float @select_olt_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8-LABEL: select_olt_float: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: blt cr0, .LBB12_2 +; NO-FAST-P8-NEXT: blt cr0, .LBB14_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB12_2: # %entry +; NO-FAST-P8-NEXT: .LBB14_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_olt_float: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P9-NEXT: blt cr0, .LBB12_2 +; NO-FAST-P9-NEXT: blt cr0, .LBB14_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB12_2: # %entry +; NO-FAST-P9-NEXT: .LBB14_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -541,20 +627,20 @@ define double @select_olt_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_olt_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: blt cr0, .LBB13_2 +; NO-FAST-P8-NEXT: blt cr0, .LBB15_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB13_2: # %entry +; NO-FAST-P8-NEXT: .LBB15_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_olt_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: blt cr0, .LBB13_2 +; NO-FAST-P9-NEXT: blt cr0, .LBB15_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB13_2: # %entry +; NO-FAST-P9-NEXT: .LBB15_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -641,20 +727,20 @@ define float @select_ogt_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8-LABEL: select_ogt_float: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bgt cr0, .LBB16_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB18_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB16_2: # %entry +; NO-FAST-P8-NEXT: .LBB18_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_ogt_float: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P9-NEXT: bgt cr0, .LBB16_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB18_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB16_2: # %entry +; NO-FAST-P9-NEXT: .LBB18_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -679,20 +765,20 @@ define double @select_ogt_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8-LABEL: select_ogt_double: ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bgt cr0, .LBB17_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB19_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB17_2: # %entry +; NO-FAST-P8-NEXT: .LBB19_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; ; NO-FAST-P9-LABEL: select_ogt_double: ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P9-NEXT: bgt cr0, .LBB17_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB19_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB17_2: # %entry +; NO-FAST-P9-NEXT: .LBB19_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -780,10 +866,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB20_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB20_2: # %entry +; NO-FAST-P8-NEXT: .LBB22_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -791,10 +877,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB20_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB20_2: # %entry +; NO-FAST-P9-NEXT: .LBB22_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -820,10 +906,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P8: # %bb.0: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB21_2 +; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f3, f4 -; NO-FAST-P8-NEXT: .LBB21_2: # %entry +; NO-FAST-P8-NEXT: .LBB23_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -831,10 +917,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) { ; NO-FAST-P9: # %bb.0: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2 ; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt -; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB21_2 +; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f3, f4 -; NO-FAST-P9-NEXT: .LBB21_2: # %entry +; NO-FAST-P9-NEXT: .LBB23_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -926,13 +1012,13 @@ define double @onecmp1(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f0 -; NO-FAST-P8-NEXT: bc 12, lt, .LBB24_3 +; NO-FAST-P8-NEXT: bc 12, lt, .LBB26_3 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fcmpu cr0, f1, f1 -; NO-FAST-P8-NEXT: bc 12, un, .LBB24_3 +; NO-FAST-P8-NEXT: bc 12, un, .LBB26_3 ; NO-FAST-P8-NEXT: # %bb.2: # %entry ; NO-FAST-P8-NEXT: fmr f3, f2 -; NO-FAST-P8-NEXT: .LBB24_3: # %entry +; NO-FAST-P8-NEXT: .LBB26_3: # %entry ; NO-FAST-P8-NEXT: fmr f1, f3 ; NO-FAST-P8-NEXT: blr ; @@ -941,13 +1027,13 @@ define double @onecmp1(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f0 -; NO-FAST-P9-NEXT: bc 12, lt, .LBB24_3 +; NO-FAST-P9-NEXT: bc 12, lt, .LBB26_3 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fcmpu cr0, f1, f1 -; NO-FAST-P9-NEXT: bc 12, un, .LBB24_3 +; NO-FAST-P9-NEXT: bc 12, un, .LBB26_3 ; NO-FAST-P9-NEXT: # %bb.2: # %entry ; NO-FAST-P9-NEXT: fmr f3, f2 -; NO-FAST-P9-NEXT: .LBB24_3: # %entry +; NO-FAST-P9-NEXT: .LBB26_3: # %entry ; NO-FAST-P9-NEXT: fmr f1, f3 ; NO-FAST-P9-NEXT: blr entry: @@ -978,10 +1064,10 @@ define double @onecmp2(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P8-NEXT: bgt cr0, .LBB25_2 +; NO-FAST-P8-NEXT: bgt cr0, .LBB27_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f2, f3 -; NO-FAST-P8-NEXT: .LBB25_2: # %entry +; NO-FAST-P8-NEXT: .LBB27_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f2 ; NO-FAST-P8-NEXT: blr ; @@ -990,10 +1076,10 @@ define double @onecmp2(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P9-NEXT: bgt cr0, .LBB25_2 +; NO-FAST-P9-NEXT: bgt cr0, .LBB27_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f2, f3 -; NO-FAST-P9-NEXT: .LBB25_2: # %entry +; NO-FAST-P9-NEXT: .LBB27_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f2 ; NO-FAST-P9-NEXT: blr entry: @@ -1028,10 +1114,10 @@ define double @onecmp3(double %a, double %y, double %z) { ; NO-FAST-P8-NEXT: vspltisw v2, 1 ; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P8-NEXT: beq cr0, .LBB26_2 +; NO-FAST-P8-NEXT: beq cr0, .LBB28_2 ; NO-FAST-P8-NEXT: # %bb.1: # %entry ; NO-FAST-P8-NEXT: fmr f2, f3 -; NO-FAST-P8-NEXT: .LBB26_2: # %entry +; NO-FAST-P8-NEXT: .LBB28_2: # %entry ; NO-FAST-P8-NEXT: fmr f1, f2 ; NO-FAST-P8-NEXT: blr ; @@ -1040,10 +1126,10 @@ define double @onecmp3(double %a, double %y, double %z) { ; NO-FAST-P9-NEXT: vspltisw v2, 1 ; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34 ; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0 -; NO-FAST-P9-NEXT: beq cr0, .LBB26_2 +; NO-FAST-P9-NEXT: beq cr0, .LBB28_2 ; NO-FAST-P9-NEXT: # %bb.1: # %entry ; NO-FAST-P9-NEXT: fmr f2, f3 -; NO-FAST-P9-NEXT: .LBB26_2: # %entry +; NO-FAST-P9-NEXT: .LBB28_2: # %entry ; NO-FAST-P9-NEXT: fmr f1, f2 ; NO-FAST-P9-NEXT: blr entry: diff --git a/llvm/test/CodeGen/PowerPC/vector-all-ones.ll b/llvm/test/CodeGen/PowerPC/vector-all-ones.ll new file mode 100644 index 0000000000000..e4c93adcf50a6 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vector-all-ones.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc64-ibm-aix \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 -mtriple=powerpc-ibm-aix \ +; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s + +; Currently the generated code uses `vspltisw` to generate vector of 1s followed by add operation. +; This pattern is expected to be optimized in a future patch by using `xxleqv` to generate vector of -1s +; followed by subtraction operation. +define dso_local noundef <4 x i32> @test1(<4 x i32> %a) { +; CHECK-LABEL: test1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vspltisw v3, 1 +; CHECK-NEXT: vadduwm v2, v2, v3 +; CHECK-NEXT: blr +entry: + %add = add <4 x i32> %a, splat (i32 1) + ret <4 x i32> %add +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll index 7f387a763b6da..23f660bb026a7 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-intrinsics.ll @@ -10,7 +10,7 @@ ; RUN: | FileCheck -check-prefix=RV64IF %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d \ ; RUN: -target-abi=lp64d \ -; RUN: | FileCheck -check-prefix=RV64IF %s +; RUN: | FileCheck -check-prefix=RV64IFD %s ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel \ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \ @@ -27,6 +27,11 @@ define float @sqrt_f32(float %a) nounwind { ; RV64IF-NEXT: fsqrt.s fa0, fa0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: sqrt_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fsqrt.s fa0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sqrt_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -68,6 +73,16 @@ define float @powi_f32(float %a, i32 %b) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: powi_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: sext.w a0, a0 +; RV64IFD-NEXT: call __powisf2 +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: powi_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -109,6 +124,15 @@ define float @sin_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: sin_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call sinf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sin_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -149,6 +173,15 @@ define float @cos_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: cos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call cosf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: cos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -190,6 +223,42 @@ define float @sincos_f32(float %a) nounwind { ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; +; RV64IF-LABEL: sincos_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV64IF-NEXT: fsw fs1, 0(sp) # 4-byte Folded Spill +; RV64IF-NEXT: fmv.s fs0, fa0 +; RV64IF-NEXT: call sinf +; RV64IF-NEXT: fmv.s fs1, fa0 +; RV64IF-NEXT: fmv.s fa0, fs0 +; RV64IF-NEXT: call cosf +; RV64IF-NEXT: fadd.s fa0, fs1, fa0 +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload +; RV64IF-NEXT: flw fs1, 0(sp) # 4-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; +; RV64IFD-LABEL: sincos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -32 +; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fmv.s fs0, fa0 +; RV64IFD-NEXT: call sinf +; RV64IFD-NEXT: fmv.s fs1, fa0 +; RV64IFD-NEXT: fmv.s fa0, fs0 +; RV64IFD-NEXT: call cosf +; RV64IFD-NEXT: fadd.s fa0, fs1, fa0 +; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 32 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sincos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -254,6 +323,15 @@ define float @pow_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: pow_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call powf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: pow_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -294,6 +372,15 @@ define float @exp_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: exp_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call expf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: exp_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -334,6 +421,15 @@ define float @exp2_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: exp2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call exp2f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: exp2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -374,6 +470,15 @@ define float @exp10_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: exp10_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call exp10f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: exp10_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -414,6 +519,15 @@ define float @log_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: log_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call logf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: log_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -454,6 +568,15 @@ define float @log10_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: log10_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call log10f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: log10_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -494,6 +617,15 @@ define float @log2_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: log2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call log2f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: log2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -526,6 +658,11 @@ define float @fma_f32(float %a, float %b, float %c) nounwind { ; RV64IF-NEXT: fmadd.s fa0, fa0, fa1, fa2 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: fma_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmadd.s fa0, fa0, fa1, fa2 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fma_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -558,6 +695,11 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ; RV64IF-NEXT: fmadd.s fa0, fa0, fa1, fa2 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: fmuladd_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmadd.s fa0, fa0, fa1, fa2 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fmuladd_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -600,6 +742,11 @@ define float @fabs_f32(float %a) nounwind { ; RV64IF-NEXT: fabs.s fa0, fa0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: fabs_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fabs.s fa0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fabs_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -626,6 +773,11 @@ define float @minnum_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: fmin.s fa0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: minnum_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmin.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: minnum_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -658,6 +810,11 @@ define float @maxnum_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: fmax.s fa0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: maxnum_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmax.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: maxnum_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -690,6 +847,11 @@ define float @copysign_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: fsgnj.s fa0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: copysign_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fsgnj.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: copysign_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 524288 @@ -730,6 +892,15 @@ define float @ceil_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: ceil_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call ceilf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: ceil_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -770,6 +941,15 @@ define float @trunc_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: trunc_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call truncf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: trunc_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -810,6 +990,15 @@ define float @rint_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: rint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call rintf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: rint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -850,6 +1039,15 @@ define float @nearbyint_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: nearbyint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call nearbyintf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: nearbyint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -890,6 +1088,15 @@ define float @round_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: round_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call roundf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: round_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -930,6 +1137,15 @@ define float @roundeven_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: roundeven_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call roundevenf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: roundeven_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -966,6 +1182,13 @@ define i1 @fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 927 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1039,6 +1262,13 @@ define i1 @isnan_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 768 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1073,6 +1303,13 @@ define i1 @isqnan_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isqnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 512 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isqnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1109,6 +1346,13 @@ define i1 @issnan_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: issnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 256 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: issnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1149,6 +1393,13 @@ define i1 @isinf_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isinf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 129 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isinf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1185,6 +1436,13 @@ define i1 @isposinf_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isposinf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 128 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isposinf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1218,6 +1476,13 @@ define i1 @isneginf_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isneginf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 1 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isneginf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 1046528 @@ -1251,6 +1516,13 @@ define i1 @isfinite_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 126 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1285,6 +1557,13 @@ define i1 @isposfinite_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isposfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 112 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isposfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1316,6 +1595,13 @@ define i1 @isnegfinite_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isnegfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 14 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnegfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1357,6 +1643,13 @@ define i1 @isnotfinite_fpclass(float %x) { ; RV64IF-NEXT: snez a0, a0 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: isnotfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 897 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnotfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1401,6 +1694,15 @@ define float @tan_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: tan_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call tanf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: tan_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1442,6 +1744,16 @@ define float @ldexp_float(float %x, i32 %y) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: ldexp_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: sext.w a0, a0 +; RV64IFD-NEXT: call ldexpf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: ldexp_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1483,6 +1795,15 @@ define float @asin_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: asin_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call asinf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: asin_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1523,6 +1844,15 @@ define float @acos_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: acos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call acosf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: acos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1563,6 +1893,15 @@ define float @atan_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: atan_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call atanf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: atan_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1603,6 +1942,15 @@ define float @atan2_f32(float %a, float %b) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: atan2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call atan2f +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: atan2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1643,6 +1991,15 @@ define float @sinh_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: sinh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call sinhf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sinh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1683,6 +2040,15 @@ define float @cosh_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: cosh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call coshf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: cosh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1723,6 +2089,15 @@ define float @tanh_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; +; RV64IFD-LABEL: tanh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call tanhf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: tanh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 diff --git a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll index 6608874286e34..f8b1d505f4e81 100644 --- a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll +++ b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll @@ -6,9 +6,9 @@ ; RUN: llc -mtriple=riscv64 -mattr=+conditional-cmv-fusion,+c,+zicond -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=CMOV,CMOV-ZICOND %s ; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt -verify-machineinstrs < %s \ -; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND %s +; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND,SFB-NOZICOND-NOC %s ; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt,+c -verify-machineinstrs < %s \ -; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND %s +; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND,SFB-NOZICOND-C %s ; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt,+zicond -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-ZICOND %s @@ -263,6 +263,24 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) { ; CMOV-NEXT: .LBB6_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_xor_2: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB6_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: xor a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB6_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_xor_2: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB6_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: xor a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB6_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_xor_2: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -300,6 +318,24 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) { ; CMOV-NEXT: .LBB7_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_xor_2b: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB7_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: xor a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB7_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_xor_2b: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB7_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: xor a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB7_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_xor_2b: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -335,6 +371,24 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) { ; CMOV-NEXT: .LBB8_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_or: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB8_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: or a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB8_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_or: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB8_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: or a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB8_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_or: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -372,6 +426,24 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) { ; CMOV-NEXT: .LBB9_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_or_b: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB9_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: or a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB9_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_or_b: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB9_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: or a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB9_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_or_b: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -407,6 +479,24 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) { ; CMOV-NEXT: .LBB10_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_or_1: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB10_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: or a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB10_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_or_1: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB10_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: or a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB10_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_or_1: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 @@ -444,6 +534,24 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) { ; CMOV-NEXT: .LBB11_2: # %entry ; CMOV-NEXT: ret ; +; SFB-NOZICOND-NOC-LABEL: select_or_1b: +; SFB-NOZICOND-NOC: # %bb.0: # %entry +; SFB-NOZICOND-NOC-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-NOC-NEXT: beqz a2, .LBB11_2 +; SFB-NOZICOND-NOC-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-NOC-NEXT: or a0, a1, a0 +; SFB-NOZICOND-NOC-NEXT: .LBB11_2: # %entry +; SFB-NOZICOND-NOC-NEXT: ret +; +; SFB-NOZICOND-C-LABEL: select_or_1b: +; SFB-NOZICOND-C: # %bb.0: # %entry +; SFB-NOZICOND-C-NEXT: andi a2, a2, 1 +; SFB-NOZICOND-C-NEXT: beqz a2, .LBB11_2 +; SFB-NOZICOND-C-NEXT: # %bb.1: # %entry +; SFB-NOZICOND-C-NEXT: or a0, a0, a1 +; SFB-NOZICOND-C-NEXT: .LBB11_2: # %entry +; SFB-NOZICOND-C-NEXT: ret +; ; SFB-ZICOND-LABEL: select_or_1b: ; SFB-ZICOND: # %bb.0: # %entry ; SFB-ZICOND-NEXT: andi a2, a2, 1 diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll index 1deea55b083ce..cd3ff779d8cd3 100644 --- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ -; RUN: -target-abi=ilp32d | FileCheck %s +; RUN: -target-abi=ilp32d | FileCheck --check-prefixes=CHECK,RV32D %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ -; RUN: -target-abi=lp64d | FileCheck %s +; RUN: -target-abi=lp64d | FileCheck --check-prefixes=CHECK,RV64D %s ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32 | FileCheck --check-prefix=CHECKRV32ZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ @@ -640,6 +640,39 @@ define signext i32 @select_fcmp_uge_1_2(double %a, double %b) nounwind { } define double @CascadedSelect(double noundef %a) { +; RV32D-LABEL: CascadedSelect: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: lui a0, %hi(.LCPI20_0) +; RV32D-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; RV32D-NEXT: flt.d a0, fa5, fa0 +; RV32D-NEXT: bnez a0, .LBB20_3 +; RV32D-NEXT: # %bb.1: # %entry +; RV32D-NEXT: fcvt.d.w fa5, zero +; RV32D-NEXT: flt.d a0, fa0, fa5 +; RV32D-NEXT: bnez a0, .LBB20_3 +; RV32D-NEXT: # %bb.2: # %entry +; RV32D-NEXT: fmv.d fa5, fa0 +; RV32D-NEXT: .LBB20_3: # %entry +; RV32D-NEXT: fmv.d fa0, fa5 +; RV32D-NEXT: ret +; +; RV64D-LABEL: CascadedSelect: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: li a0, 1023 +; RV64D-NEXT: slli a0, a0, 52 +; RV64D-NEXT: fmv.d.x fa5, a0 +; RV64D-NEXT: flt.d a0, fa5, fa0 +; RV64D-NEXT: bnez a0, .LBB20_3 +; RV64D-NEXT: # %bb.1: # %entry +; RV64D-NEXT: fmv.d.x fa5, zero +; RV64D-NEXT: flt.d a0, fa0, fa5 +; RV64D-NEXT: bnez a0, .LBB20_3 +; RV64D-NEXT: # %bb.2: # %entry +; RV64D-NEXT: fmv.d fa5, fa0 +; RV64D-NEXT: .LBB20_3: # %entry +; RV64D-NEXT: fmv.d fa0, fa5 +; RV64D-NEXT: ret +; ; CHECKRV32ZDINX-LABEL: CascadedSelect: ; CHECKRV32ZDINX: # %bb.0: # %entry ; CHECKRV32ZDINX-NEXT: lui a3, %hi(.LCPI20_0) diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll index 8b8a3257a0027..b1230ae9dd6bf 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -16,7 +16,7 @@ ; RUN: | FileCheck -check-prefix=RV64IZFINX %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -verify-machineinstrs -target-abi=lp64d \ -; RUN: | FileCheck -check-prefix=RV64IF %s +; RUN: | FileCheck -check-prefixes=RV64IFD %s ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \ ; RUN: -verify-machineinstrs | FileCheck -check-prefix=RV32I %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \ @@ -45,6 +45,11 @@ define float @sqrt_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fsqrt.s a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: sqrt_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fsqrt.s fa0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sqrt_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -97,6 +102,16 @@ define float @powi_f32(float %a, i32 %b) nounwind { ; RV64IZFINX-NEXT: addi sp, sp, 16 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: powi_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: sext.w a0, a0 +; RV64IFD-NEXT: call __powisf2 +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: powi_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -138,6 +153,10 @@ define float @sin_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail sinf ; +; RV64IFD-LABEL: sin_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail sinf +; ; RV32I-LABEL: sin_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -178,6 +197,10 @@ define float @cos_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail cosf ; +; RV64IFD-LABEL: cos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail cosf +; ; RV32I-LABEL: cos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -237,6 +260,24 @@ define float @sincos_f32(float %a) nounwind { ; RV32IZFINX-NEXT: addi sp, sp, 16 ; RV32IZFINX-NEXT: ret ; +; RV64IF-LABEL: sincos_f32: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill +; RV64IF-NEXT: fsw fs1, 0(sp) # 4-byte Folded Spill +; RV64IF-NEXT: fmv.s fs0, fa0 +; RV64IF-NEXT: call sinf +; RV64IF-NEXT: fmv.s fs1, fa0 +; RV64IF-NEXT: fmv.s fa0, fs0 +; RV64IF-NEXT: call cosf +; RV64IF-NEXT: fadd.s fa0, fs1, fa0 +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload +; RV64IF-NEXT: flw fs1, 0(sp) # 4-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; ; RV64IZFINX-LABEL: sincos_f32: ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: addi sp, sp, -32 @@ -255,6 +296,24 @@ define float @sincos_f32(float %a) nounwind { ; RV64IZFINX-NEXT: addi sp, sp, 32 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: sincos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -32 +; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: fmv.s fs0, fa0 +; RV64IFD-NEXT: call sinf +; RV64IFD-NEXT: fmv.s fs1, fa0 +; RV64IFD-NEXT: fmv.s fa0, fs0 +; RV64IFD-NEXT: call cosf +; RV64IFD-NEXT: fadd.s fa0, fs1, fa0 +; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 32 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: sincos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -319,6 +378,10 @@ define float @pow_f32(float %a, float %b) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail powf ; +; RV64IFD-LABEL: pow_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail powf +; ; RV32I-LABEL: pow_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -359,6 +422,10 @@ define float @exp_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail expf ; +; RV64IFD-LABEL: exp_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail expf +; ; RV32I-LABEL: exp_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -399,6 +466,10 @@ define float @exp2_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail exp2f ; +; RV64IFD-LABEL: exp2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail exp2f +; ; RV32I-LABEL: exp2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -437,6 +508,10 @@ define float @exp10_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail exp10f ; +; RV64IFD-LABEL: exp10_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail exp10f +; ; RV32I-LABEL: exp10_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -477,6 +552,10 @@ define float @log_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail logf ; +; RV64IFD-LABEL: log_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail logf +; ; RV32I-LABEL: log_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -517,6 +596,10 @@ define float @log10_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail log10f ; +; RV64IFD-LABEL: log10_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail log10f +; ; RV32I-LABEL: log10_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -557,6 +640,10 @@ define float @log2_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail log2f ; +; RV64IFD-LABEL: log2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail log2f +; ; RV32I-LABEL: log2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -601,6 +688,11 @@ define float @fma_f32(float %a, float %b, float %c) nounwind { ; RV64IZFINX-NEXT: fmadd.s a0, a0, a1, a2 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: fma_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmadd.s fa0, fa0, fa1, fa2 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fma_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -645,6 +737,11 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ; RV64IZFINX-NEXT: fmadd.s a0, a0, a1, a2 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: fmuladd_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmadd.s fa0, fa0, fa1, fa2 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fmuladd_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -699,6 +796,11 @@ define float @fabs_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fabs.s a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: fabs_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fabs.s fa0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fabs_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -737,6 +839,11 @@ define float @minnum_f32(float %a, float %b) nounwind { ; RV64IZFINX-NEXT: fmin.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: minnum_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmin.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: minnum_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -781,6 +888,11 @@ define float @maxnum_f32(float %a, float %b) nounwind { ; RV64IZFINX-NEXT: fmax.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: maxnum_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmax.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: maxnum_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -825,6 +937,11 @@ define float @copysign_f32(float %a, float %b) nounwind { ; RV64IZFINX-NEXT: fsgnj.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: copysign_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fsgnj.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: copysign_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 524288 @@ -903,6 +1020,20 @@ define float @floor_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB18_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: floor_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB18_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rdn +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rdn +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB18_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: floor_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -981,6 +1112,20 @@ define float @ceil_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB19_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: ceil_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB19_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rup +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rup +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB19_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: ceil_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1059,6 +1204,20 @@ define float @trunc_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB20_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: trunc_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB20_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rtz +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rtz +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB20_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: trunc_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1137,6 +1296,20 @@ define float @rint_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB21_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: rint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB21_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0 +; RV64IFD-NEXT: fcvt.s.w fa5, a0 +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB21_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: rint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1177,6 +1350,10 @@ define float @nearbyint_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail nearbyintf ; +; RV64IFD-LABEL: nearbyint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail nearbyintf +; ; RV32I-LABEL: nearbyint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1255,6 +1432,20 @@ define float @round_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB23_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: round_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB23_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rmm +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rmm +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB23_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: round_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1333,6 +1524,20 @@ define float @roundeven_f32(float %a) nounwind { ; RV64IZFINX-NEXT: .LBB24_2: ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: roundeven_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: lui a0, 307200 +; RV64IFD-NEXT: fmv.w.x fa5, a0 +; RV64IFD-NEXT: fabs.s fa4, fa0 +; RV64IFD-NEXT: flt.s a0, fa4, fa5 +; RV64IFD-NEXT: beqz a0, .LBB24_2 +; RV64IFD-NEXT: # %bb.1: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rne +; RV64IFD-NEXT: fcvt.s.w fa5, a0, rne +; RV64IFD-NEXT: fsgnj.s fa0, fa5, fa0 +; RV64IFD-NEXT: .LBB24_2: +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: roundeven_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1377,6 +1582,11 @@ define iXLen @lrint_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.l.s a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: lrint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.l.s a0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: lrint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1422,6 +1632,11 @@ define iXLen @lround_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: lround_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.l.s a0, fa0, rmm +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: lround_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1466,6 +1681,11 @@ define i32 @lround_i32_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rmm ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: lround_i32_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.w.s a0, fa0, rmm +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: lround_i32_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1518,6 +1738,11 @@ define i64 @llrint_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.l.s a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: llrint_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.l.s a0, fa0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: llrint_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1570,6 +1795,11 @@ define i64 @llround_f32(float %a) nounwind { ; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: llround_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fcvt.l.s a0, fa0, rmm +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: llround_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1621,6 +1851,13 @@ define i1 @fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 927 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a0, 1 @@ -1705,6 +1942,13 @@ define i1 @isnan_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 768 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1749,6 +1993,12 @@ define i1 @isqnan_fpclass(float %x) { ; RV64IZFINX-NEXT: srli a0, a0, 9 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isqnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: srli a0, a0, 9 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isqnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1799,6 +2049,13 @@ define i1 @issnan_fpclass(float %x) { ; RV64IZFINX-NEXT: srli a0, a0, 63 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: issnan_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: slli a0, a0, 55 +; RV64IFD-NEXT: srli a0, a0, 63 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: issnan_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1853,6 +2110,13 @@ define i1 @isinf_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isinf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 129 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isinf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -1903,6 +2167,13 @@ define i1 @isposinf_fpclass(float %x) { ; RV64IZFINX-NEXT: srli a0, a0, 63 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isposinf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: slli a0, a0, 56 +; RV64IFD-NEXT: srli a0, a0, 63 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isposinf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 522240 @@ -1946,6 +2217,12 @@ define i1 @isneginf_fpclass(float %x) { ; RV64IZFINX-NEXT: andi a0, a0, 1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isneginf_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isneginf_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 1046528 @@ -1993,6 +2270,13 @@ define i1 @isfinite_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 126 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -2041,6 +2325,13 @@ define i1 @isposfinite_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isposfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 112 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isposfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 23 @@ -2085,6 +2376,13 @@ define i1 @isnegfinite_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isnegfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 14 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnegfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a0, 1 @@ -2137,6 +2435,13 @@ define i1 @isnotfinite_fpclass(float %x) { ; RV64IZFINX-NEXT: snez a0, a0 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: isnotfinite_fpclass: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fclass.s a0, fa0 +; RV64IFD-NEXT: andi a0, a0, 897 +; RV64IFD-NEXT: snez a0, a0 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: isnotfinite_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 @@ -2175,6 +2480,10 @@ define float @tan_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail tanf ; +; RV64IFD-LABEL: tan_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail tanf +; ; RV32I-LABEL: tan_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2219,6 +2528,11 @@ define float @maximumnum_float(float %x, float %y) { ; RV64IZFINX-NEXT: fmax.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: maximumnum_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmax.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: maximumnum_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2271,6 +2585,11 @@ define float @minimumnum_float(float %x, float %y) { ; RV64IZFINX-NEXT: fmin.s a0, a0, a1 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: minimumnum_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmin.s fa0, fa0, fa1 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: minimumnum_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2327,6 +2646,15 @@ define float @ldexp_float(float %x, i32 signext %y) nounwind { ; RV64IZFINX-NEXT: addi sp, sp, 16 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: ldexp_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call ldexpf +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: ldexp_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2393,6 +2721,17 @@ define {float, i32} @frexp_float(float %x) nounwind { ; RV64IZFINX-NEXT: addi sp, sp, 16 ; RV64IZFINX-NEXT: ret ; +; RV64IFD-LABEL: frexp_float: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: mv a0, sp +; RV64IFD-NEXT: call frexpf +; RV64IFD-NEXT: ld a0, 0(sp) +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; ; RV32I-LABEL: frexp_float: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2435,6 +2774,10 @@ define float @asin_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail asinf ; +; RV64IFD-LABEL: asin_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail asinf +; ; RV32I-LABEL: asin_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2473,6 +2816,10 @@ define float @acos_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail acosf ; +; RV64IFD-LABEL: acos_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail acosf +; ; RV32I-LABEL: acos_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2511,6 +2858,10 @@ define float @atan_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail atanf ; +; RV64IFD-LABEL: atan_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail atanf +; ; RV32I-LABEL: atan_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2549,6 +2900,10 @@ define float @atan2_f32(float %a, float %b) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail atan2f ; +; RV64IFD-LABEL: atan2_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail atan2f +; ; RV32I-LABEL: atan2_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2587,6 +2942,10 @@ define float @sinh_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail sinhf ; +; RV64IFD-LABEL: sinh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail sinhf +; ; RV32I-LABEL: sinh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2625,6 +2984,10 @@ define float @cosh_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail coshf ; +; RV64IFD-LABEL: cosh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail coshf +; ; RV32I-LABEL: cosh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2663,6 +3026,10 @@ define float @tanh_f32(float %a) nounwind { ; RV64IZFINX: # %bb.0: ; RV64IZFINX-NEXT: tail tanhf ; +; RV64IFD-LABEL: tanh_f32: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: tail tanhf +; ; RV32I-LABEL: tanh_f32: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 diff --git a/llvm/test/CodeGen/RISCV/idiv_large.ll b/llvm/test/CodeGen/RISCV/idiv_large.ll index fb7e4a4d103d0..9937627962208 100644 --- a/llvm/test/CodeGen/RISCV/idiv_large.ll +++ b/llvm/test/CodeGen/RISCV/idiv_large.ll @@ -1,4 +1,3 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 < %s | FileCheck %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll index abb929eaaf6e6..e256ba9dd5997 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN define void @fcmp_oeq_vv_v8f16(ptr %x, ptr %y, ptr %z) { ; ZVFH-LABEL: fcmp_oeq_vv_v8f16: @@ -437,6 +437,1036 @@ define void @fcmp_ugt_vv_v64f16(ptr %x, ptr %y, ptr %z) { ; ZVFH-NEXT: vmnot.m v8, v24 ; ZVFH-NEXT: vsm.v v8, (a2) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_vv_v64f16: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 512 +; RV32ZVFHMIN-NEXT: sw ra, 508(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 504(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a3, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a3, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v16, (a1) +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: addi a1, sp, 256 +; RV32ZVFHMIN-NEXT: vse16.v v16, (a0) +; RV32ZVFHMIN-NEXT: vse16.v v8, (a1) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: lh a1, 320(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: lh a1, 318(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: lh a1, 316(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: lh a1, 314(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: lh a1, 312(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: lh a1, 310(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: lh a1, 308(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: lh a1, 306(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a1, 176(sp) +; RV32ZVFHMIN-NEXT: lh a4, 304(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a0, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: sb a1, 88(sp) +; RV32ZVFHMIN-NEXT: lh a4, 174(sp) +; RV32ZVFHMIN-NEXT: lh a5, 302(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v16, 7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 87(sp) +; RV32ZVFHMIN-NEXT: lh a4, 172(sp) +; RV32ZVFHMIN-NEXT: lh a5, 300(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 7 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v16, 6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 86(sp) +; RV32ZVFHMIN-NEXT: lh a4, 170(sp) +; RV32ZVFHMIN-NEXT: lh a5, 298(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 6 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v16, 5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 85(sp) +; RV32ZVFHMIN-NEXT: lh a4, 168(sp) +; RV32ZVFHMIN-NEXT: lh a5, 296(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v19, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v16, 4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 84(sp) +; RV32ZVFHMIN-NEXT: lh a4, 166(sp) +; RV32ZVFHMIN-NEXT: lh a5, 294(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v21, v8, 4 +; RV32ZVFHMIN-NEXT: vslidedown.vi v23, v16, 3 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 83(sp) +; RV32ZVFHMIN-NEXT: lh a4, 164(sp) +; RV32ZVFHMIN-NEXT: lh a5, 292(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 3 +; RV32ZVFHMIN-NEXT: vslidedown.vi v25, v16, 2 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 82(sp) +; RV32ZVFHMIN-NEXT: lh a4, 162(sp) +; RV32ZVFHMIN-NEXT: lh a5, 290(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v24, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v26, v16, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: xori a0, a4, 1 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: lh a1, 288(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a4, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: lh a1, 354(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v27, v8, 1 +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v16, 15 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a4, 224(sp) +; RV32ZVFHMIN-NEXT: lh a5, 352(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a1, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a0, v13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 112(sp) +; RV32ZVFHMIN-NEXT: lh a4, 222(sp) +; RV32ZVFHMIN-NEXT: lh a6, 350(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 15 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 111(sp) +; RV32ZVFHMIN-NEXT: lh a4, 220(sp) +; RV32ZVFHMIN-NEXT: lh a6, 348(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v16, 14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 110(sp) +; RV32ZVFHMIN-NEXT: lh t0, 218(sp) +; RV32ZVFHMIN-NEXT: lh t1, 346(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v19 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: sb t0, 109(sp) +; RV32ZVFHMIN-NEXT: lh t0, 216(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v16, 13 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v20 +; RV32ZVFHMIN-NEXT: lh t2, 344(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v21 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v16, 12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV32ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: xori a1, t2, 1 +; RV32ZVFHMIN-NEXT: sb a1, 108(sp) +; RV32ZVFHMIN-NEXT: lh a1, 214(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a0 +; RV32ZVFHMIN-NEXT: lh t3, 342(sp) +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v23 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: sb a1, 107(sp) +; RV32ZVFHMIN-NEXT: lh a5, 212(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh a7, 340(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s t3, v22 +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v16, 11 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: sb a5, 106(sp) +; RV32ZVFHMIN-NEXT: lh a5, 210(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: lh a6, 338(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v25 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: sb a5, 105(sp) +; RV32ZVFHMIN-NEXT: lh a6, 208(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh t0, 336(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV32ZVFHMIN-NEXT: vslidedown.vi v24, v16, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: sb t0, 104(sp) +; RV32ZVFHMIN-NEXT: lh t0, 206(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: lh t1, 334(sp) +; RV32ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v26 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: xori a7, t1, 1 +; RV32ZVFHMIN-NEXT: sb a7, 103(sp) +; RV32ZVFHMIN-NEXT: lh a7, 204(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a6, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh t1, 332(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v27 +; RV32ZVFHMIN-NEXT: vslidedown.vi v26, v16, 9 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: xori a7, t1, 1 +; RV32ZVFHMIN-NEXT: sb a7, 102(sp) +; RV32ZVFHMIN-NEXT: lh a7, 202(sp) +; RV32ZVFHMIN-NEXT: lh t0, 330(sp) +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: sb a7, 101(sp) +; RV32ZVFHMIN-NEXT: lh a7, 200(sp) +; RV32ZVFHMIN-NEXT: lh t0, 328(sp) +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: sb a7, 100(sp) +; RV32ZVFHMIN-NEXT: lh a7, 198(sp) +; RV32ZVFHMIN-NEXT: lh t0, 326(sp) +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori t2, t2, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: sb a7, 99(sp) +; RV32ZVFHMIN-NEXT: lh a7, 196(sp) +; RV32ZVFHMIN-NEXT: lh t0, 324(sp) +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: xori t1, t1, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: sb a7, 98(sp) +; RV32ZVFHMIN-NEXT: lh a7, 194(sp) +; RV32ZVFHMIN-NEXT: lh t0, 322(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb a6, 66(sp) +; RV32ZVFHMIN-NEXT: sb t2, 67(sp) +; RV32ZVFHMIN-NEXT: sb a5, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: sb a4, 69(sp) +; RV32ZVFHMIN-NEXT: sb a1, 70(sp) +; RV32ZVFHMIN-NEXT: sb a0, 71(sp) +; RV32ZVFHMIN-NEXT: sb a5, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: lh a1, 382(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v16, 8 +; RV32ZVFHMIN-NEXT: vslidedown.vi v2, v8, 14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: lh a1, 380(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v0, v8, 13 +; RV32ZVFHMIN-NEXT: vslidedown.vi v4, v8, 12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: lh a1, 378(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v6, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v30, v8, 10 +; RV32ZVFHMIN-NEXT: vslidedown.vi v28, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: lh a1, 376(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: lh a1, 374(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v2 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v18 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v0 +; RV32ZVFHMIN-NEXT: lh a1, 372(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v20 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: vmv.x.s t3, v4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a1, 242(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: lh a4, 370(sp) +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: sb a1, 121(sp) +; RV32ZVFHMIN-NEXT: lh a4, 240(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: lh a6, 368(sp) +; RV32ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: sb a4, 120(sp) +; RV32ZVFHMIN-NEXT: lh a6, 238(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: lh t0, 366(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: sb t0, 119(sp) +; RV32ZVFHMIN-NEXT: lh t0, 236(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: lh t1, 364(sp) +; RV32ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v30 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a5, t1, 1 +; RV32ZVFHMIN-NEXT: sb a5, 118(sp) +; RV32ZVFHMIN-NEXT: lh a5, 234(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: lh a7, 362(sp) +; RV32ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v26 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a6, a7, 1 +; RV32ZVFHMIN-NEXT: sb a6, 117(sp) +; RV32ZVFHMIN-NEXT: lh a6, 232(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: lh a7, 360(sp) +; RV32ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v28 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a5, a7, 1 +; RV32ZVFHMIN-NEXT: sb a5, 116(sp) +; RV32ZVFHMIN-NEXT: lh a5, 230(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: lh a6, 358(sp) +; RV32ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: fle.h a6, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: xori a1, a1, 1 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, t2, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: sb a6, 115(sp) +; RV32ZVFHMIN-NEXT: lh a6, 228(sp) +; RV32ZVFHMIN-NEXT: lh t2, 356(sp) +; RV32ZVFHMIN-NEXT: sb a5, 76(sp) +; RV32ZVFHMIN-NEXT: sb a4, 77(sp) +; RV32ZVFHMIN-NEXT: sb a1, 78(sp) +; RV32ZVFHMIN-NEXT: sb a0, 79(sp) +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a1, t1, 1 +; RV32ZVFHMIN-NEXT: xori a4, t0, 1 +; RV32ZVFHMIN-NEXT: xori a5, a7, 1 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 72(sp) +; RV32ZVFHMIN-NEXT: sb a5, 73(sp) +; RV32ZVFHMIN-NEXT: sb a4, 74(sp) +; RV32ZVFHMIN-NEXT: sb a1, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV32ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a3, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a2) +; RV32ZVFHMIN-NEXT: addi sp, s0, -512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 512 +; RV32ZVFHMIN-NEXT: lw ra, 508(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 504(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_vv_v64f16: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 512 +; RV64ZVFHMIN-NEXT: sd ra, 504(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 496(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a3, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a3, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v16, (a1) +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: addi a1, sp, 256 +; RV64ZVFHMIN-NEXT: vse16.v v16, (a0) +; RV64ZVFHMIN-NEXT: vse16.v v8, (a1) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: lh a1, 320(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: lh a1, 318(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: lh a1, 316(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: lh a1, 314(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: lh a1, 312(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: lh a1, 310(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: lh a1, 308(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: lh a1, 306(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a1, 176(sp) +; RV64ZVFHMIN-NEXT: lh a4, 304(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a0, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: sb a1, 88(sp) +; RV64ZVFHMIN-NEXT: lh a4, 174(sp) +; RV64ZVFHMIN-NEXT: lh a5, 302(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v16, 7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 87(sp) +; RV64ZVFHMIN-NEXT: lh a4, 172(sp) +; RV64ZVFHMIN-NEXT: lh a5, 300(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 7 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v16, 6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 86(sp) +; RV64ZVFHMIN-NEXT: lh a4, 170(sp) +; RV64ZVFHMIN-NEXT: lh a5, 298(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 6 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v16, 5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 85(sp) +; RV64ZVFHMIN-NEXT: lh a4, 168(sp) +; RV64ZVFHMIN-NEXT: lh a5, 296(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v19, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v16, 4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 84(sp) +; RV64ZVFHMIN-NEXT: lh a4, 166(sp) +; RV64ZVFHMIN-NEXT: lh a5, 294(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v21, v8, 4 +; RV64ZVFHMIN-NEXT: vslidedown.vi v23, v16, 3 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 83(sp) +; RV64ZVFHMIN-NEXT: lh a4, 164(sp) +; RV64ZVFHMIN-NEXT: lh a5, 292(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vi v25, v16, 2 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 82(sp) +; RV64ZVFHMIN-NEXT: lh a4, 162(sp) +; RV64ZVFHMIN-NEXT: lh a5, 290(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v24, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v26, v16, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: xori a0, a4, 1 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: lh a1, 288(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a4, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: lh a1, 354(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v27, v8, 1 +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v16, 15 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a4, 224(sp) +; RV64ZVFHMIN-NEXT: lh a5, 352(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a1, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a0, v13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 112(sp) +; RV64ZVFHMIN-NEXT: lh a4, 222(sp) +; RV64ZVFHMIN-NEXT: lh a6, 350(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 15 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 111(sp) +; RV64ZVFHMIN-NEXT: lh a4, 220(sp) +; RV64ZVFHMIN-NEXT: lh a6, 348(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v16, 14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 110(sp) +; RV64ZVFHMIN-NEXT: lh t0, 218(sp) +; RV64ZVFHMIN-NEXT: lh t1, 346(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v19 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: sb t0, 109(sp) +; RV64ZVFHMIN-NEXT: lh t0, 216(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v16, 13 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v20 +; RV64ZVFHMIN-NEXT: lh t2, 344(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v21 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v16, 12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV64ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: xori a1, t2, 1 +; RV64ZVFHMIN-NEXT: sb a1, 108(sp) +; RV64ZVFHMIN-NEXT: lh a1, 214(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a0 +; RV64ZVFHMIN-NEXT: lh t3, 342(sp) +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v23 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: sb a1, 107(sp) +; RV64ZVFHMIN-NEXT: lh a5, 212(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh a7, 340(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s t3, v22 +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v16, 11 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: sb a5, 106(sp) +; RV64ZVFHMIN-NEXT: lh a5, 210(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: lh a6, 338(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v25 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: sb a5, 105(sp) +; RV64ZVFHMIN-NEXT: lh a6, 208(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh t0, 336(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV64ZVFHMIN-NEXT: vslidedown.vi v24, v16, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: sb t0, 104(sp) +; RV64ZVFHMIN-NEXT: lh t0, 206(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: lh t1, 334(sp) +; RV64ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v26 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: xori a7, t1, 1 +; RV64ZVFHMIN-NEXT: sb a7, 103(sp) +; RV64ZVFHMIN-NEXT: lh a7, 204(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a6, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh t1, 332(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v27 +; RV64ZVFHMIN-NEXT: vslidedown.vi v26, v16, 9 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: xori a7, t1, 1 +; RV64ZVFHMIN-NEXT: sb a7, 102(sp) +; RV64ZVFHMIN-NEXT: lh a7, 202(sp) +; RV64ZVFHMIN-NEXT: lh t0, 330(sp) +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: sb a7, 101(sp) +; RV64ZVFHMIN-NEXT: lh a7, 200(sp) +; RV64ZVFHMIN-NEXT: lh t0, 328(sp) +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: sb a7, 100(sp) +; RV64ZVFHMIN-NEXT: lh a7, 198(sp) +; RV64ZVFHMIN-NEXT: lh t0, 326(sp) +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori t2, t2, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: sb a7, 99(sp) +; RV64ZVFHMIN-NEXT: lh a7, 196(sp) +; RV64ZVFHMIN-NEXT: lh t0, 324(sp) +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: xori t1, t1, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: sb a7, 98(sp) +; RV64ZVFHMIN-NEXT: lh a7, 194(sp) +; RV64ZVFHMIN-NEXT: lh t0, 322(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb a6, 66(sp) +; RV64ZVFHMIN-NEXT: sb t2, 67(sp) +; RV64ZVFHMIN-NEXT: sb a5, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: sb a4, 69(sp) +; RV64ZVFHMIN-NEXT: sb a1, 70(sp) +; RV64ZVFHMIN-NEXT: sb a0, 71(sp) +; RV64ZVFHMIN-NEXT: sb a5, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: lh a1, 382(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v16, 8 +; RV64ZVFHMIN-NEXT: vslidedown.vi v2, v8, 14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: lh a1, 380(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v0, v8, 13 +; RV64ZVFHMIN-NEXT: vslidedown.vi v4, v8, 12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: lh a1, 378(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v6, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v30, v8, 10 +; RV64ZVFHMIN-NEXT: vslidedown.vi v28, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: lh a1, 376(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: lh a1, 374(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v2 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v18 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v0 +; RV64ZVFHMIN-NEXT: lh a1, 372(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v20 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmv.x.s t3, v4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a1, 242(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: lh a4, 370(sp) +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: sb a1, 121(sp) +; RV64ZVFHMIN-NEXT: lh a4, 240(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: lh a6, 368(sp) +; RV64ZVFHMIN-NEXT: fle.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: sb a4, 120(sp) +; RV64ZVFHMIN-NEXT: lh a6, 238(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: lh t0, 366(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: sb t0, 119(sp) +; RV64ZVFHMIN-NEXT: lh t0, 236(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: lh t1, 364(sp) +; RV64ZVFHMIN-NEXT: fle.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v30 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a5, t1, 1 +; RV64ZVFHMIN-NEXT: sb a5, 118(sp) +; RV64ZVFHMIN-NEXT: lh a5, 234(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: lh a7, 362(sp) +; RV64ZVFHMIN-NEXT: fle.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v26 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a6, a7, 1 +; RV64ZVFHMIN-NEXT: sb a6, 117(sp) +; RV64ZVFHMIN-NEXT: lh a6, 232(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: lh a7, 360(sp) +; RV64ZVFHMIN-NEXT: fle.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v28 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a5, a7, 1 +; RV64ZVFHMIN-NEXT: sb a5, 116(sp) +; RV64ZVFHMIN-NEXT: lh a5, 230(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: lh a6, 358(sp) +; RV64ZVFHMIN-NEXT: fle.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: fle.h a6, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: xori a1, a1, 1 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, t2, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: sb a6, 115(sp) +; RV64ZVFHMIN-NEXT: lh a6, 228(sp) +; RV64ZVFHMIN-NEXT: lh t2, 356(sp) +; RV64ZVFHMIN-NEXT: sb a5, 76(sp) +; RV64ZVFHMIN-NEXT: sb a4, 77(sp) +; RV64ZVFHMIN-NEXT: sb a1, 78(sp) +; RV64ZVFHMIN-NEXT: sb a0, 79(sp) +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a1, t1, 1 +; RV64ZVFHMIN-NEXT: xori a4, t0, 1 +; RV64ZVFHMIN-NEXT: xori a5, a7, 1 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 72(sp) +; RV64ZVFHMIN-NEXT: sb a5, 73(sp) +; RV64ZVFHMIN-NEXT: sb a4, 74(sp) +; RV64ZVFHMIN-NEXT: sb a1, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV64ZVFHMIN-NEXT: fle.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a3, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a2) +; RV64ZVFHMIN-NEXT: addi sp, s0, -512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 512 +; RV64ZVFHMIN-NEXT: ld ra, 504(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 496(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = load <64 x half>, ptr %y %c = fcmp ugt <64 x half> %a, %b @@ -454,6 +1484,908 @@ define void @fcmp_ugt_vv_v64f16_nonans(ptr %x, ptr %y, ptr %z) { ; ZVFH-NEXT: vmflt.vv v24, v16, v8 ; ZVFH-NEXT: vsm.v v24, (a2) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_vv_v64f16_nonans: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 512 +; RV32ZVFHMIN-NEXT: sw ra, 508(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 504(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a3, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a3, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v16, (a0) +; RV32ZVFHMIN-NEXT: vle16.v v8, (a1) +; RV32ZVFHMIN-NEXT: addi a0, sp, 256 +; RV32ZVFHMIN-NEXT: addi a1, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v16, (a0) +; RV32ZVFHMIN-NEXT: vse16.v v8, (a1) +; RV32ZVFHMIN-NEXT: lh a0, 320(sp) +; RV32ZVFHMIN-NEXT: lh a1, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 318(sp) +; RV32ZVFHMIN-NEXT: lh a1, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 316(sp) +; RV32ZVFHMIN-NEXT: lh a1, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 314(sp) +; RV32ZVFHMIN-NEXT: lh a1, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 312(sp) +; RV32ZVFHMIN-NEXT: lh a1, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 310(sp) +; RV32ZVFHMIN-NEXT: lh a1, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 308(sp) +; RV32ZVFHMIN-NEXT: lh a1, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 306(sp) +; RV32ZVFHMIN-NEXT: lh a1, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 304(sp) +; RV32ZVFHMIN-NEXT: lh a1, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 302(sp) +; RV32ZVFHMIN-NEXT: lh a1, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 300(sp) +; RV32ZVFHMIN-NEXT: lh a1, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a1, 298(sp) +; RV32ZVFHMIN-NEXT: lh a4, 170(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a0, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a1, 85(sp) +; RV32ZVFHMIN-NEXT: lh a4, 296(sp) +; RV32ZVFHMIN-NEXT: lh a5, 168(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v16, 7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 84(sp) +; RV32ZVFHMIN-NEXT: lh a4, 294(sp) +; RV32ZVFHMIN-NEXT: lh a5, 166(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 7 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v16, 6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 83(sp) +; RV32ZVFHMIN-NEXT: lh a4, 292(sp) +; RV32ZVFHMIN-NEXT: lh a5, 164(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 6 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v16, 5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 82(sp) +; RV32ZVFHMIN-NEXT: lh a4, 290(sp) +; RV32ZVFHMIN-NEXT: lh a5, 162(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v19, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v16, 4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: sb a4, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 288(sp) +; RV32ZVFHMIN-NEXT: lh a4, 160(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a1, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 354(sp) +; RV32ZVFHMIN-NEXT: lh a1, 226(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v21, v8, 4 +; RV32ZVFHMIN-NEXT: vslidedown.vi v23, v16, 3 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 352(sp) +; RV32ZVFHMIN-NEXT: lh a1, 224(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 3 +; RV32ZVFHMIN-NEXT: vslidedown.vi v25, v16, 2 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 350(sp) +; RV32ZVFHMIN-NEXT: lh a1, 222(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v24, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v27, v16, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 348(sp) +; RV32ZVFHMIN-NEXT: lh a1, 220(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v26, v8, 1 +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v16, 15 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a4, 346(sp) +; RV32ZVFHMIN-NEXT: lh a5, 218(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a1, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a0, v13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 109(sp) +; RV32ZVFHMIN-NEXT: lh a4, 344(sp) +; RV32ZVFHMIN-NEXT: lh a6, 216(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 15 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 108(sp) +; RV32ZVFHMIN-NEXT: lh a4, 342(sp) +; RV32ZVFHMIN-NEXT: lh a6, 214(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v16, 14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 107(sp) +; RV32ZVFHMIN-NEXT: lh t0, 340(sp) +; RV32ZVFHMIN-NEXT: lh t1, 212(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v19 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb t0, 106(sp) +; RV32ZVFHMIN-NEXT: lh t1, 338(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v16, 13 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: lh t2, 210(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v21 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v16, 12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV32ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: sb t2, 105(sp) +; RV32ZVFHMIN-NEXT: lh a1, 336(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a0 +; RV32ZVFHMIN-NEXT: lh t3, 208(sp) +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v23 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: sb a1, 104(sp) +; RV32ZVFHMIN-NEXT: lh a5, 334(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh a7, 206(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s t3, v22 +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v16, 11 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a5, 103(sp) +; RV32ZVFHMIN-NEXT: lh a5, 332(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: lh a6, 204(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v25 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a5, 102(sp) +; RV32ZVFHMIN-NEXT: lh a6, 330(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh t0, 202(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV32ZVFHMIN-NEXT: vslidedown.vi v24, v16, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: sb t0, 101(sp) +; RV32ZVFHMIN-NEXT: lh t0, 328(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: lh t1, 200(sp) +; RV32ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v27 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: sb t1, 100(sp) +; RV32ZVFHMIN-NEXT: lh a7, 326(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a6, fa4, fa5 +; RV32ZVFHMIN-NEXT: lh t1, 198(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v26 +; RV32ZVFHMIN-NEXT: vslidedown.vi v26, v16, 9 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb t1, 99(sp) +; RV32ZVFHMIN-NEXT: lh t0, 324(sp) +; RV32ZVFHMIN-NEXT: lh t1, 196(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb t0, 98(sp) +; RV32ZVFHMIN-NEXT: lh t0, 322(sp) +; RV32ZVFHMIN-NEXT: lh t1, 194(sp) +; RV32ZVFHMIN-NEXT: sb a7, 65(sp) +; RV32ZVFHMIN-NEXT: sb a6, 66(sp) +; RV32ZVFHMIN-NEXT: sb t2, 67(sp) +; RV32ZVFHMIN-NEXT: sb a5, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a4, 69(sp) +; RV32ZVFHMIN-NEXT: sb a1, 70(sp) +; RV32ZVFHMIN-NEXT: sb a0, 71(sp) +; RV32ZVFHMIN-NEXT: sb a5, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 382(sp) +; RV32ZVFHMIN-NEXT: lh a1, 254(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v16, 8 +; RV32ZVFHMIN-NEXT: vslidedown.vi v2, v8, 14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 380(sp) +; RV32ZVFHMIN-NEXT: lh a1, 252(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v0, v8, 13 +; RV32ZVFHMIN-NEXT: vslidedown.vi v4, v8, 12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 378(sp) +; RV32ZVFHMIN-NEXT: lh a1, 250(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v6, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v30, v8, 10 +; RV32ZVFHMIN-NEXT: vslidedown.vi v28, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 376(sp) +; RV32ZVFHMIN-NEXT: lh a1, 248(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v14 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 374(sp) +; RV32ZVFHMIN-NEXT: lh a1, 246(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v2 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v18 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 372(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v0 +; RV32ZVFHMIN-NEXT: lh a1, 244(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v20 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: vmv.x.s t3, v4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a1, 370(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: lh a4, 242(sp) +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a1, 121(sp) +; RV32ZVFHMIN-NEXT: lh a4, 368(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: lh a6, 240(sp) +; RV32ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a4, 120(sp) +; RV32ZVFHMIN-NEXT: lh a6, 366(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: lh t0, 238(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: sb t0, 119(sp) +; RV32ZVFHMIN-NEXT: lh t0, 364(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV32ZVFHMIN-NEXT: lh t1, 236(sp) +; RV32ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v30 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: sb t1, 118(sp) +; RV32ZVFHMIN-NEXT: lh a5, 362(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: lh a7, 234(sp) +; RV32ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v26 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a7, 117(sp) +; RV32ZVFHMIN-NEXT: lh a6, 360(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV32ZVFHMIN-NEXT: lh a7, 232(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v28 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: sb a7, 116(sp) +; RV32ZVFHMIN-NEXT: lh a5, 358(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: lh a6, 230(sp) +; RV32ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a6, fa4, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV32ZVFHMIN-NEXT: sb a6, 115(sp) +; RV32ZVFHMIN-NEXT: lh a5, 356(sp) +; RV32ZVFHMIN-NEXT: lh a6, 228(sp) +; RV32ZVFHMIN-NEXT: sb t2, 76(sp) +; RV32ZVFHMIN-NEXT: sb a4, 77(sp) +; RV32ZVFHMIN-NEXT: sb a1, 78(sp) +; RV32ZVFHMIN-NEXT: sb a0, 79(sp) +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 72(sp) +; RV32ZVFHMIN-NEXT: sb a7, 73(sp) +; RV32ZVFHMIN-NEXT: sb t0, 74(sp) +; RV32ZVFHMIN-NEXT: sb t1, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV32ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a3, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a2) +; RV32ZVFHMIN-NEXT: addi sp, s0, -512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 512 +; RV32ZVFHMIN-NEXT: lw ra, 508(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 504(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 512 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_vv_v64f16_nonans: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 512 +; RV64ZVFHMIN-NEXT: sd ra, 504(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 496(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a3, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a3, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v16, (a0) +; RV64ZVFHMIN-NEXT: vle16.v v8, (a1) +; RV64ZVFHMIN-NEXT: addi a0, sp, 256 +; RV64ZVFHMIN-NEXT: addi a1, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v16, (a0) +; RV64ZVFHMIN-NEXT: vse16.v v8, (a1) +; RV64ZVFHMIN-NEXT: lh a0, 320(sp) +; RV64ZVFHMIN-NEXT: lh a1, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 318(sp) +; RV64ZVFHMIN-NEXT: lh a1, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 316(sp) +; RV64ZVFHMIN-NEXT: lh a1, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 314(sp) +; RV64ZVFHMIN-NEXT: lh a1, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 312(sp) +; RV64ZVFHMIN-NEXT: lh a1, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 310(sp) +; RV64ZVFHMIN-NEXT: lh a1, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 308(sp) +; RV64ZVFHMIN-NEXT: lh a1, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 306(sp) +; RV64ZVFHMIN-NEXT: lh a1, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 304(sp) +; RV64ZVFHMIN-NEXT: lh a1, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 302(sp) +; RV64ZVFHMIN-NEXT: lh a1, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 300(sp) +; RV64ZVFHMIN-NEXT: lh a1, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a1, 298(sp) +; RV64ZVFHMIN-NEXT: lh a4, 170(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a0, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a1, 85(sp) +; RV64ZVFHMIN-NEXT: lh a4, 296(sp) +; RV64ZVFHMIN-NEXT: lh a5, 168(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a1, v8 +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v16, 7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 84(sp) +; RV64ZVFHMIN-NEXT: lh a4, 294(sp) +; RV64ZVFHMIN-NEXT: lh a5, 166(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 7 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v16, 6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 83(sp) +; RV64ZVFHMIN-NEXT: lh a4, 292(sp) +; RV64ZVFHMIN-NEXT: lh a5, 164(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 6 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v16, 5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 82(sp) +; RV64ZVFHMIN-NEXT: lh a4, 290(sp) +; RV64ZVFHMIN-NEXT: lh a5, 162(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v19, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v16, 4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: sb a4, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 288(sp) +; RV64ZVFHMIN-NEXT: lh a4, 160(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a1, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 354(sp) +; RV64ZVFHMIN-NEXT: lh a1, 226(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v21, v8, 4 +; RV64ZVFHMIN-NEXT: vslidedown.vi v23, v16, 3 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 352(sp) +; RV64ZVFHMIN-NEXT: lh a1, 224(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vi v25, v16, 2 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 350(sp) +; RV64ZVFHMIN-NEXT: lh a1, 222(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v24, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v27, v16, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 348(sp) +; RV64ZVFHMIN-NEXT: lh a1, 220(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v26, v8, 1 +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v16, 15 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a4, 346(sp) +; RV64ZVFHMIN-NEXT: lh a5, 218(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a1, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a0, v13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 109(sp) +; RV64ZVFHMIN-NEXT: lh a4, 344(sp) +; RV64ZVFHMIN-NEXT: lh a6, 216(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 15 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 108(sp) +; RV64ZVFHMIN-NEXT: lh a4, 342(sp) +; RV64ZVFHMIN-NEXT: lh a6, 214(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v16, 14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 107(sp) +; RV64ZVFHMIN-NEXT: lh t0, 340(sp) +; RV64ZVFHMIN-NEXT: lh t1, 212(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v19 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb t0, 106(sp) +; RV64ZVFHMIN-NEXT: lh t1, 338(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v16, 13 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: lh t2, 210(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v21 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v16, 12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t2 +; RV64ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: sb t2, 105(sp) +; RV64ZVFHMIN-NEXT: lh a1, 336(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a0 +; RV64ZVFHMIN-NEXT: lh t3, 208(sp) +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v23 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: sb a1, 104(sp) +; RV64ZVFHMIN-NEXT: lh a5, 334(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh a7, 206(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s t3, v22 +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v16, 11 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a5, 103(sp) +; RV64ZVFHMIN-NEXT: lh a5, 332(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: lh a6, 204(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v25 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a5, 102(sp) +; RV64ZVFHMIN-NEXT: lh a6, 330(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh t0, 202(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV64ZVFHMIN-NEXT: vslidedown.vi v24, v16, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: sb t0, 101(sp) +; RV64ZVFHMIN-NEXT: lh t0, 328(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: lh t1, 200(sp) +; RV64ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v27 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: sb t1, 100(sp) +; RV64ZVFHMIN-NEXT: lh a7, 326(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a6, fa4, fa5 +; RV64ZVFHMIN-NEXT: lh t1, 198(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v26 +; RV64ZVFHMIN-NEXT: vslidedown.vi v26, v16, 9 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb t1, 99(sp) +; RV64ZVFHMIN-NEXT: lh t0, 324(sp) +; RV64ZVFHMIN-NEXT: lh t1, 196(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb t0, 98(sp) +; RV64ZVFHMIN-NEXT: lh t0, 322(sp) +; RV64ZVFHMIN-NEXT: lh t1, 194(sp) +; RV64ZVFHMIN-NEXT: sb a7, 65(sp) +; RV64ZVFHMIN-NEXT: sb a6, 66(sp) +; RV64ZVFHMIN-NEXT: sb t2, 67(sp) +; RV64ZVFHMIN-NEXT: sb a5, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h a5, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a4, 69(sp) +; RV64ZVFHMIN-NEXT: sb a1, 70(sp) +; RV64ZVFHMIN-NEXT: sb a0, 71(sp) +; RV64ZVFHMIN-NEXT: sb a5, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 382(sp) +; RV64ZVFHMIN-NEXT: lh a1, 254(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v16, 8 +; RV64ZVFHMIN-NEXT: vslidedown.vi v2, v8, 14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 380(sp) +; RV64ZVFHMIN-NEXT: lh a1, 252(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v0, v8, 13 +; RV64ZVFHMIN-NEXT: vslidedown.vi v4, v8, 12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 378(sp) +; RV64ZVFHMIN-NEXT: lh a1, 250(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v6, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v30, v8, 10 +; RV64ZVFHMIN-NEXT: vslidedown.vi v28, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 376(sp) +; RV64ZVFHMIN-NEXT: lh a1, 248(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v14 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 374(sp) +; RV64ZVFHMIN-NEXT: lh a1, 246(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v2 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v18 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 372(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v0 +; RV64ZVFHMIN-NEXT: lh a1, 244(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v20 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: vmv.x.s t3, v4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a1 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a1, 370(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: lh a4, 242(sp) +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a1 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a4 +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a1, 121(sp) +; RV64ZVFHMIN-NEXT: lh a4, 368(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: lh a6, 240(sp) +; RV64ZVFHMIN-NEXT: flt.h a1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a4, 120(sp) +; RV64ZVFHMIN-NEXT: lh a6, 366(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: lh t0, 238(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v24 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: sb t0, 119(sp) +; RV64ZVFHMIN-NEXT: lh t0, 364(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t3 +; RV64ZVFHMIN-NEXT: lh t1, 236(sp) +; RV64ZVFHMIN-NEXT: flt.h t2, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v30 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: sb t1, 118(sp) +; RV64ZVFHMIN-NEXT: lh a5, 362(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: lh a7, 234(sp) +; RV64ZVFHMIN-NEXT: flt.h t1, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v26 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a7, 117(sp) +; RV64ZVFHMIN-NEXT: lh a6, 360(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, t0 +; RV64ZVFHMIN-NEXT: lh a7, 232(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v28 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: sb a7, 116(sp) +; RV64ZVFHMIN-NEXT: lh a5, 358(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: lh a6, 230(sp) +; RV64ZVFHMIN-NEXT: flt.h a7, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a6, fa4, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a5 +; RV64ZVFHMIN-NEXT: sb a6, 115(sp) +; RV64ZVFHMIN-NEXT: lh a5, 356(sp) +; RV64ZVFHMIN-NEXT: lh a6, 228(sp) +; RV64ZVFHMIN-NEXT: sb t2, 76(sp) +; RV64ZVFHMIN-NEXT: sb a4, 77(sp) +; RV64ZVFHMIN-NEXT: sb a1, 78(sp) +; RV64ZVFHMIN-NEXT: sb a0, 79(sp) +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 72(sp) +; RV64ZVFHMIN-NEXT: sb a7, 73(sp) +; RV64ZVFHMIN-NEXT: sb t0, 74(sp) +; RV64ZVFHMIN-NEXT: sb t1, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa4, a6 +; RV64ZVFHMIN-NEXT: flt.h a0, fa4, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a3, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a2) +; RV64ZVFHMIN-NEXT: addi sp, s0, -512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 512 +; RV64ZVFHMIN-NEXT: ld ra, 504(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 496(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 512 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = load <64 x half>, ptr %y %c = fcmp nnan ugt <64 x half> %a, %b @@ -1069,6 +3001,744 @@ define void @fcmp_ugt_vf_v64f16(ptr %x, half %y, ptr %z) { ; ZVFH-NEXT: vmnot.m v8, v16 ; ZVFH-NEXT: vsm.v v8, (a1) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_vf_v64f16: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV32ZVFHMIN-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a2, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a0, 170(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 85(sp) +; RV32ZVFHMIN-NEXT: lh a0, 168(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 84(sp) +; RV32ZVFHMIN-NEXT: lh a0, 166(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 83(sp) +; RV32ZVFHMIN-NEXT: lh a0, 164(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 82(sp) +; RV32ZVFHMIN-NEXT: lh a0, 162(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a3, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 224(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 222(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 220(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a0, 218(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 109(sp) +; RV32ZVFHMIN-NEXT: lh a0, 216(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV32ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 108(sp) +; RV32ZVFHMIN-NEXT: lh a0, 214(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 107(sp) +; RV32ZVFHMIN-NEXT: lh a0, 212(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 106(sp) +; RV32ZVFHMIN-NEXT: lh a0, 210(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 105(sp) +; RV32ZVFHMIN-NEXT: lh a0, 208(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 104(sp) +; RV32ZVFHMIN-NEXT: lh a0, 206(sp) +; RV32ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fle.h a4, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 103(sp) +; RV32ZVFHMIN-NEXT: lh a0, 204(sp) +; RV32ZVFHMIN-NEXT: fle.h a5, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: fle.h a6, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 102(sp) +; RV32ZVFHMIN-NEXT: lh a0, 202(sp) +; RV32ZVFHMIN-NEXT: fle.h a7, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fle.h t0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 101(sp) +; RV32ZVFHMIN-NEXT: lh a0, 200(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 100(sp) +; RV32ZVFHMIN-NEXT: lh a0, 198(sp) +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 99(sp) +; RV32ZVFHMIN-NEXT: lh a0, 196(sp) +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: xori t1, t1, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 98(sp) +; RV32ZVFHMIN-NEXT: lh a0, 194(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb t0, 66(sp) +; RV32ZVFHMIN-NEXT: sb a7, 67(sp) +; RV32ZVFHMIN-NEXT: sb a6, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a5, 69(sp) +; RV32ZVFHMIN-NEXT: sb a4, 70(sp) +; RV32ZVFHMIN-NEXT: sb a3, 71(sp) +; RV32ZVFHMIN-NEXT: sb a0, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a0, 242(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 121(sp) +; RV32ZVFHMIN-NEXT: lh a0, 240(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 120(sp) +; RV32ZVFHMIN-NEXT: lh a0, 238(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 119(sp) +; RV32ZVFHMIN-NEXT: lh a0, 236(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 118(sp) +; RV32ZVFHMIN-NEXT: lh a0, 234(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: fle.h a5, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 117(sp) +; RV32ZVFHMIN-NEXT: lh a0, 232(sp) +; RV32ZVFHMIN-NEXT: fle.h a6, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fle.h a7, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 116(sp) +; RV32ZVFHMIN-NEXT: lh a0, 230(sp) +; RV32ZVFHMIN-NEXT: fle.h t0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 115(sp) +; RV32ZVFHMIN-NEXT: lh a0, 228(sp) +; RV32ZVFHMIN-NEXT: sb a6, 76(sp) +; RV32ZVFHMIN-NEXT: sb a5, 77(sp) +; RV32ZVFHMIN-NEXT: sb a4, 78(sp) +; RV32ZVFHMIN-NEXT: sb a3, 79(sp) +; RV32ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a4, a7, 1 +; RV32ZVFHMIN-NEXT: xori a5, t0, 1 +; RV32ZVFHMIN-NEXT: xori a6, t1, 1 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: sb a3, 72(sp) +; RV32ZVFHMIN-NEXT: sb a6, 73(sp) +; RV32ZVFHMIN-NEXT: sb a5, 74(sp) +; RV32ZVFHMIN-NEXT: sb a4, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV32ZVFHMIN-NEXT: addi sp, s0, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV32ZVFHMIN-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_vf_v64f16: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV64ZVFHMIN-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a2, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a0, 170(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 85(sp) +; RV64ZVFHMIN-NEXT: lh a0, 168(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 84(sp) +; RV64ZVFHMIN-NEXT: lh a0, 166(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 83(sp) +; RV64ZVFHMIN-NEXT: lh a0, 164(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 82(sp) +; RV64ZVFHMIN-NEXT: lh a0, 162(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a3, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 224(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 222(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 220(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a0, 218(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 109(sp) +; RV64ZVFHMIN-NEXT: lh a0, 216(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV64ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 108(sp) +; RV64ZVFHMIN-NEXT: lh a0, 214(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 107(sp) +; RV64ZVFHMIN-NEXT: lh a0, 212(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 106(sp) +; RV64ZVFHMIN-NEXT: lh a0, 210(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 105(sp) +; RV64ZVFHMIN-NEXT: lh a0, 208(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 104(sp) +; RV64ZVFHMIN-NEXT: lh a0, 206(sp) +; RV64ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fle.h a4, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 103(sp) +; RV64ZVFHMIN-NEXT: lh a0, 204(sp) +; RV64ZVFHMIN-NEXT: fle.h a5, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: fle.h a6, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 102(sp) +; RV64ZVFHMIN-NEXT: lh a0, 202(sp) +; RV64ZVFHMIN-NEXT: fle.h a7, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fle.h t0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 101(sp) +; RV64ZVFHMIN-NEXT: lh a0, 200(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 100(sp) +; RV64ZVFHMIN-NEXT: lh a0, 198(sp) +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 99(sp) +; RV64ZVFHMIN-NEXT: lh a0, 196(sp) +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: xori t1, t1, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 98(sp) +; RV64ZVFHMIN-NEXT: lh a0, 194(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb t0, 66(sp) +; RV64ZVFHMIN-NEXT: sb a7, 67(sp) +; RV64ZVFHMIN-NEXT: sb a6, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a5, 69(sp) +; RV64ZVFHMIN-NEXT: sb a4, 70(sp) +; RV64ZVFHMIN-NEXT: sb a3, 71(sp) +; RV64ZVFHMIN-NEXT: sb a0, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a0, 242(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 121(sp) +; RV64ZVFHMIN-NEXT: lh a0, 240(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 120(sp) +; RV64ZVFHMIN-NEXT: lh a0, 238(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 119(sp) +; RV64ZVFHMIN-NEXT: lh a0, 236(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 118(sp) +; RV64ZVFHMIN-NEXT: lh a0, 234(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: fle.h a5, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 117(sp) +; RV64ZVFHMIN-NEXT: lh a0, 232(sp) +; RV64ZVFHMIN-NEXT: fle.h a6, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fle.h a7, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 116(sp) +; RV64ZVFHMIN-NEXT: lh a0, 230(sp) +; RV64ZVFHMIN-NEXT: fle.h t0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 115(sp) +; RV64ZVFHMIN-NEXT: lh a0, 228(sp) +; RV64ZVFHMIN-NEXT: sb a6, 76(sp) +; RV64ZVFHMIN-NEXT: sb a5, 77(sp) +; RV64ZVFHMIN-NEXT: sb a4, 78(sp) +; RV64ZVFHMIN-NEXT: sb a3, 79(sp) +; RV64ZVFHMIN-NEXT: fle.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a4, a7, 1 +; RV64ZVFHMIN-NEXT: xori a5, t0, 1 +; RV64ZVFHMIN-NEXT: xori a6, t1, 1 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: sb a3, 72(sp) +; RV64ZVFHMIN-NEXT: sb a6, 73(sp) +; RV64ZVFHMIN-NEXT: sb a5, 74(sp) +; RV64ZVFHMIN-NEXT: sb a4, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV64ZVFHMIN-NEXT: addi sp, s0, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV64ZVFHMIN-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = insertelement <64 x half> poison, half %y, i32 0 %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer @@ -1086,6 +3756,616 @@ define void @fcmp_ugt_vf_v64f16_nonans(ptr %x, half %y, ptr %z) { ; ZVFH-NEXT: vmfgt.vf v16, v8, fa0 ; ZVFH-NEXT: vsm.v v16, (a1) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_vf_v64f16_nonans: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV32ZVFHMIN-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a2, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a0, 170(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 85(sp) +; RV32ZVFHMIN-NEXT: lh a0, 168(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 84(sp) +; RV32ZVFHMIN-NEXT: lh a0, 166(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 83(sp) +; RV32ZVFHMIN-NEXT: lh a0, 164(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 82(sp) +; RV32ZVFHMIN-NEXT: lh a0, 162(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a3, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 224(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 222(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 220(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a0, 218(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 109(sp) +; RV32ZVFHMIN-NEXT: lh a0, 216(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 108(sp) +; RV32ZVFHMIN-NEXT: lh a0, 214(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 107(sp) +; RV32ZVFHMIN-NEXT: lh a0, 212(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 106(sp) +; RV32ZVFHMIN-NEXT: lh a0, 210(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 105(sp) +; RV32ZVFHMIN-NEXT: lh a0, 208(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 104(sp) +; RV32ZVFHMIN-NEXT: lh a0, 206(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 103(sp) +; RV32ZVFHMIN-NEXT: lh a0, 204(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 102(sp) +; RV32ZVFHMIN-NEXT: lh a0, 202(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 101(sp) +; RV32ZVFHMIN-NEXT: lh a0, 200(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: flt.h a5, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a0, 100(sp) +; RV32ZVFHMIN-NEXT: lh a0, 198(sp) +; RV32ZVFHMIN-NEXT: flt.h a6, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a0, 99(sp) +; RV32ZVFHMIN-NEXT: lh a0, 196(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 98(sp) +; RV32ZVFHMIN-NEXT: lh a0, 194(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb t0, 66(sp) +; RV32ZVFHMIN-NEXT: sb a7, 67(sp) +; RV32ZVFHMIN-NEXT: sb a6, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a5, 69(sp) +; RV32ZVFHMIN-NEXT: sb a4, 70(sp) +; RV32ZVFHMIN-NEXT: sb a3, 71(sp) +; RV32ZVFHMIN-NEXT: sb a0, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a0, 242(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 121(sp) +; RV32ZVFHMIN-NEXT: lh a0, 240(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 120(sp) +; RV32ZVFHMIN-NEXT: lh a0, 238(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 119(sp) +; RV32ZVFHMIN-NEXT: lh a0, 236(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 118(sp) +; RV32ZVFHMIN-NEXT: lh a0, 234(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: flt.h a5, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a0, 117(sp) +; RV32ZVFHMIN-NEXT: lh a0, 232(sp) +; RV32ZVFHMIN-NEXT: flt.h a6, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a0, 116(sp) +; RV32ZVFHMIN-NEXT: lh a0, 230(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: sb a0, 115(sp) +; RV32ZVFHMIN-NEXT: lh a0, 228(sp) +; RV32ZVFHMIN-NEXT: sb a6, 76(sp) +; RV32ZVFHMIN-NEXT: sb a5, 77(sp) +; RV32ZVFHMIN-NEXT: sb a4, 78(sp) +; RV32ZVFHMIN-NEXT: sb a3, 79(sp) +; RV32ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a3, 72(sp) +; RV32ZVFHMIN-NEXT: sb t1, 73(sp) +; RV32ZVFHMIN-NEXT: sb t0, 74(sp) +; RV32ZVFHMIN-NEXT: sb a7, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV32ZVFHMIN-NEXT: addi sp, s0, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV32ZVFHMIN-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_vf_v64f16_nonans: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV64ZVFHMIN-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a2, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a0, 170(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 85(sp) +; RV64ZVFHMIN-NEXT: lh a0, 168(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 84(sp) +; RV64ZVFHMIN-NEXT: lh a0, 166(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 83(sp) +; RV64ZVFHMIN-NEXT: lh a0, 164(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 82(sp) +; RV64ZVFHMIN-NEXT: lh a0, 162(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a3, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 224(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 222(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 220(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a0, 218(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 109(sp) +; RV64ZVFHMIN-NEXT: lh a0, 216(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 108(sp) +; RV64ZVFHMIN-NEXT: lh a0, 214(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 107(sp) +; RV64ZVFHMIN-NEXT: lh a0, 212(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 106(sp) +; RV64ZVFHMIN-NEXT: lh a0, 210(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 105(sp) +; RV64ZVFHMIN-NEXT: lh a0, 208(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 104(sp) +; RV64ZVFHMIN-NEXT: lh a0, 206(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 103(sp) +; RV64ZVFHMIN-NEXT: lh a0, 204(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 102(sp) +; RV64ZVFHMIN-NEXT: lh a0, 202(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 101(sp) +; RV64ZVFHMIN-NEXT: lh a0, 200(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: flt.h a5, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a0, 100(sp) +; RV64ZVFHMIN-NEXT: lh a0, 198(sp) +; RV64ZVFHMIN-NEXT: flt.h a6, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a0, 99(sp) +; RV64ZVFHMIN-NEXT: lh a0, 196(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 98(sp) +; RV64ZVFHMIN-NEXT: lh a0, 194(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb t0, 66(sp) +; RV64ZVFHMIN-NEXT: sb a7, 67(sp) +; RV64ZVFHMIN-NEXT: sb a6, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a5, 69(sp) +; RV64ZVFHMIN-NEXT: sb a4, 70(sp) +; RV64ZVFHMIN-NEXT: sb a3, 71(sp) +; RV64ZVFHMIN-NEXT: sb a0, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a0, 242(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 121(sp) +; RV64ZVFHMIN-NEXT: lh a0, 240(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 120(sp) +; RV64ZVFHMIN-NEXT: lh a0, 238(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 119(sp) +; RV64ZVFHMIN-NEXT: lh a0, 236(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 118(sp) +; RV64ZVFHMIN-NEXT: lh a0, 234(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: flt.h a5, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a0, 117(sp) +; RV64ZVFHMIN-NEXT: lh a0, 232(sp) +; RV64ZVFHMIN-NEXT: flt.h a6, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a0, 116(sp) +; RV64ZVFHMIN-NEXT: lh a0, 230(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: sb a0, 115(sp) +; RV64ZVFHMIN-NEXT: lh a0, 228(sp) +; RV64ZVFHMIN-NEXT: sb a6, 76(sp) +; RV64ZVFHMIN-NEXT: sb a5, 77(sp) +; RV64ZVFHMIN-NEXT: sb a4, 78(sp) +; RV64ZVFHMIN-NEXT: sb a3, 79(sp) +; RV64ZVFHMIN-NEXT: flt.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a3, 72(sp) +; RV64ZVFHMIN-NEXT: sb t1, 73(sp) +; RV64ZVFHMIN-NEXT: sb t0, 74(sp) +; RV64ZVFHMIN-NEXT: sb a7, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV64ZVFHMIN-NEXT: addi sp, s0, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV64ZVFHMIN-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = insertelement <64 x half> poison, half %y, i32 0 %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer @@ -1710,6 +4990,744 @@ define void @fcmp_ugt_fv_v64f16(ptr %x, half %y, ptr %z) { ; ZVFH-NEXT: vmnot.m v8, v16 ; ZVFH-NEXT: vsm.v v8, (a1) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_fv_v64f16: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV32ZVFHMIN-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a2, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a0, 170(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 85(sp) +; RV32ZVFHMIN-NEXT: lh a0, 168(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 84(sp) +; RV32ZVFHMIN-NEXT: lh a0, 166(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 83(sp) +; RV32ZVFHMIN-NEXT: lh a0, 164(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 82(sp) +; RV32ZVFHMIN-NEXT: lh a0, 162(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a3, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 224(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 222(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 220(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a0, 218(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 109(sp) +; RV32ZVFHMIN-NEXT: lh a0, 216(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV32ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 108(sp) +; RV32ZVFHMIN-NEXT: lh a0, 214(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 107(sp) +; RV32ZVFHMIN-NEXT: lh a0, 212(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 106(sp) +; RV32ZVFHMIN-NEXT: lh a0, 210(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 105(sp) +; RV32ZVFHMIN-NEXT: lh a0, 208(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 104(sp) +; RV32ZVFHMIN-NEXT: lh a0, 206(sp) +; RV32ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: fle.h a4, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 103(sp) +; RV32ZVFHMIN-NEXT: lh a0, 204(sp) +; RV32ZVFHMIN-NEXT: fle.h a5, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: fle.h a6, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 102(sp) +; RV32ZVFHMIN-NEXT: lh a0, 202(sp) +; RV32ZVFHMIN-NEXT: fle.h a7, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: fle.h t0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 101(sp) +; RV32ZVFHMIN-NEXT: lh a0, 200(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 100(sp) +; RV32ZVFHMIN-NEXT: lh a0, 198(sp) +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 99(sp) +; RV32ZVFHMIN-NEXT: lh a0, 196(sp) +; RV32ZVFHMIN-NEXT: xori a7, a7, 1 +; RV32ZVFHMIN-NEXT: xori t0, t0, 1 +; RV32ZVFHMIN-NEXT: xori t1, t1, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 98(sp) +; RV32ZVFHMIN-NEXT: lh a0, 194(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb t0, 66(sp) +; RV32ZVFHMIN-NEXT: sb a7, 67(sp) +; RV32ZVFHMIN-NEXT: sb a6, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a5, 69(sp) +; RV32ZVFHMIN-NEXT: sb a4, 70(sp) +; RV32ZVFHMIN-NEXT: sb a3, 71(sp) +; RV32ZVFHMIN-NEXT: sb a0, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a0, 242(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 121(sp) +; RV32ZVFHMIN-NEXT: lh a0, 240(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 120(sp) +; RV32ZVFHMIN-NEXT: lh a0, 238(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 119(sp) +; RV32ZVFHMIN-NEXT: lh a0, 236(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 118(sp) +; RV32ZVFHMIN-NEXT: lh a0, 234(sp) +; RV32ZVFHMIN-NEXT: fle.h a4, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: fle.h a5, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 117(sp) +; RV32ZVFHMIN-NEXT: lh a0, 232(sp) +; RV32ZVFHMIN-NEXT: fle.h a6, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: fle.h a7, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 116(sp) +; RV32ZVFHMIN-NEXT: lh a0, 230(sp) +; RV32ZVFHMIN-NEXT: fle.h t0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: fle.h t1, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: xori a4, a4, 1 +; RV32ZVFHMIN-NEXT: xori a5, a5, 1 +; RV32ZVFHMIN-NEXT: xori a6, a6, 1 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 115(sp) +; RV32ZVFHMIN-NEXT: lh a0, 228(sp) +; RV32ZVFHMIN-NEXT: sb a6, 76(sp) +; RV32ZVFHMIN-NEXT: sb a5, 77(sp) +; RV32ZVFHMIN-NEXT: sb a4, 78(sp) +; RV32ZVFHMIN-NEXT: sb a3, 79(sp) +; RV32ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a4, a7, 1 +; RV32ZVFHMIN-NEXT: xori a5, t0, 1 +; RV32ZVFHMIN-NEXT: xori a6, t1, 1 +; RV32ZVFHMIN-NEXT: xori a3, a3, 1 +; RV32ZVFHMIN-NEXT: sb a3, 72(sp) +; RV32ZVFHMIN-NEXT: sb a6, 73(sp) +; RV32ZVFHMIN-NEXT: sb a5, 74(sp) +; RV32ZVFHMIN-NEXT: sb a4, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV32ZVFHMIN-NEXT: xori a0, a0, 1 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV32ZVFHMIN-NEXT: addi sp, s0, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV32ZVFHMIN-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_fv_v64f16: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV64ZVFHMIN-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a2, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a0, 170(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 85(sp) +; RV64ZVFHMIN-NEXT: lh a0, 168(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 84(sp) +; RV64ZVFHMIN-NEXT: lh a0, 166(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 83(sp) +; RV64ZVFHMIN-NEXT: lh a0, 164(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 82(sp) +; RV64ZVFHMIN-NEXT: lh a0, 162(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a3, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 224(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 222(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 220(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a0, 218(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 109(sp) +; RV64ZVFHMIN-NEXT: lh a0, 216(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV64ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 108(sp) +; RV64ZVFHMIN-NEXT: lh a0, 214(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 107(sp) +; RV64ZVFHMIN-NEXT: lh a0, 212(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 106(sp) +; RV64ZVFHMIN-NEXT: lh a0, 210(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 105(sp) +; RV64ZVFHMIN-NEXT: lh a0, 208(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 104(sp) +; RV64ZVFHMIN-NEXT: lh a0, 206(sp) +; RV64ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: fle.h a4, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 103(sp) +; RV64ZVFHMIN-NEXT: lh a0, 204(sp) +; RV64ZVFHMIN-NEXT: fle.h a5, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: fle.h a6, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 102(sp) +; RV64ZVFHMIN-NEXT: lh a0, 202(sp) +; RV64ZVFHMIN-NEXT: fle.h a7, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: fle.h t0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 101(sp) +; RV64ZVFHMIN-NEXT: lh a0, 200(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 100(sp) +; RV64ZVFHMIN-NEXT: lh a0, 198(sp) +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 99(sp) +; RV64ZVFHMIN-NEXT: lh a0, 196(sp) +; RV64ZVFHMIN-NEXT: xori a7, a7, 1 +; RV64ZVFHMIN-NEXT: xori t0, t0, 1 +; RV64ZVFHMIN-NEXT: xori t1, t1, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 98(sp) +; RV64ZVFHMIN-NEXT: lh a0, 194(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb t0, 66(sp) +; RV64ZVFHMIN-NEXT: sb a7, 67(sp) +; RV64ZVFHMIN-NEXT: sb a6, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a5, 69(sp) +; RV64ZVFHMIN-NEXT: sb a4, 70(sp) +; RV64ZVFHMIN-NEXT: sb a3, 71(sp) +; RV64ZVFHMIN-NEXT: sb a0, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a0, 242(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 121(sp) +; RV64ZVFHMIN-NEXT: lh a0, 240(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 120(sp) +; RV64ZVFHMIN-NEXT: lh a0, 238(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 119(sp) +; RV64ZVFHMIN-NEXT: lh a0, 236(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 118(sp) +; RV64ZVFHMIN-NEXT: lh a0, 234(sp) +; RV64ZVFHMIN-NEXT: fle.h a4, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: fle.h a5, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 117(sp) +; RV64ZVFHMIN-NEXT: lh a0, 232(sp) +; RV64ZVFHMIN-NEXT: fle.h a6, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: fle.h a7, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 116(sp) +; RV64ZVFHMIN-NEXT: lh a0, 230(sp) +; RV64ZVFHMIN-NEXT: fle.h t0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: fle.h t1, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: xori a4, a4, 1 +; RV64ZVFHMIN-NEXT: xori a5, a5, 1 +; RV64ZVFHMIN-NEXT: xori a6, a6, 1 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 115(sp) +; RV64ZVFHMIN-NEXT: lh a0, 228(sp) +; RV64ZVFHMIN-NEXT: sb a6, 76(sp) +; RV64ZVFHMIN-NEXT: sb a5, 77(sp) +; RV64ZVFHMIN-NEXT: sb a4, 78(sp) +; RV64ZVFHMIN-NEXT: sb a3, 79(sp) +; RV64ZVFHMIN-NEXT: fle.h a3, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a4, a7, 1 +; RV64ZVFHMIN-NEXT: xori a5, t0, 1 +; RV64ZVFHMIN-NEXT: xori a6, t1, 1 +; RV64ZVFHMIN-NEXT: xori a3, a3, 1 +; RV64ZVFHMIN-NEXT: sb a3, 72(sp) +; RV64ZVFHMIN-NEXT: sb a6, 73(sp) +; RV64ZVFHMIN-NEXT: sb a5, 74(sp) +; RV64ZVFHMIN-NEXT: sb a4, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: fle.h a0, fa0, fa5 +; RV64ZVFHMIN-NEXT: xori a0, a0, 1 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV64ZVFHMIN-NEXT: addi sp, s0, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV64ZVFHMIN-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = insertelement <64 x half> poison, half %y, i32 0 %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer @@ -1727,6 +5745,616 @@ define void @fcmp_ugt_fv_v64f16_nonans(ptr %x, half %y, ptr %z) { ; ZVFH-NEXT: vmflt.vf v16, v8, fa0 ; ZVFH-NEXT: vsm.v v16, (a1) ; ZVFH-NEXT: ret +; +; RV32ZVFHMIN-LABEL: fcmp_ugt_fv_v64f16_nonans: +; RV32ZVFHMIN: # %bb.0: +; RV32ZVFHMIN-NEXT: addi sp, sp, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV32ZVFHMIN-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32ZVFHMIN-NEXT: .cfi_offset ra, -4 +; RV32ZVFHMIN-NEXT: .cfi_offset s0, -8 +; RV32ZVFHMIN-NEXT: addi s0, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVFHMIN-NEXT: andi sp, sp, -128 +; RV32ZVFHMIN-NEXT: li a2, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV32ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV32ZVFHMIN-NEXT: addi a0, sp, 128 +; RV32ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV32ZVFHMIN-NEXT: lh a0, 192(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 96(sp) +; RV32ZVFHMIN-NEXT: lh a0, 190(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 95(sp) +; RV32ZVFHMIN-NEXT: lh a0, 188(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 94(sp) +; RV32ZVFHMIN-NEXT: lh a0, 186(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 93(sp) +; RV32ZVFHMIN-NEXT: lh a0, 184(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 92(sp) +; RV32ZVFHMIN-NEXT: lh a0, 182(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 91(sp) +; RV32ZVFHMIN-NEXT: lh a0, 180(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 90(sp) +; RV32ZVFHMIN-NEXT: lh a0, 178(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 89(sp) +; RV32ZVFHMIN-NEXT: lh a0, 176(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 88(sp) +; RV32ZVFHMIN-NEXT: lh a0, 174(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 87(sp) +; RV32ZVFHMIN-NEXT: lh a0, 172(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 86(sp) +; RV32ZVFHMIN-NEXT: lh a0, 170(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 85(sp) +; RV32ZVFHMIN-NEXT: lh a0, 168(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 84(sp) +; RV32ZVFHMIN-NEXT: lh a0, 166(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 83(sp) +; RV32ZVFHMIN-NEXT: lh a0, 164(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 82(sp) +; RV32ZVFHMIN-NEXT: lh a0, 162(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 81(sp) +; RV32ZVFHMIN-NEXT: lh a0, 160(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a3, 64(sp) +; RV32ZVFHMIN-NEXT: sb a0, 80(sp) +; RV32ZVFHMIN-NEXT: lh a0, 226(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 113(sp) +; RV32ZVFHMIN-NEXT: lh a0, 224(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 112(sp) +; RV32ZVFHMIN-NEXT: lh a0, 222(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 111(sp) +; RV32ZVFHMIN-NEXT: lh a0, 220(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 110(sp) +; RV32ZVFHMIN-NEXT: lh a0, 218(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 109(sp) +; RV32ZVFHMIN-NEXT: lh a0, 216(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 108(sp) +; RV32ZVFHMIN-NEXT: lh a0, 214(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 107(sp) +; RV32ZVFHMIN-NEXT: lh a0, 212(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 106(sp) +; RV32ZVFHMIN-NEXT: lh a0, 210(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV32ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 105(sp) +; RV32ZVFHMIN-NEXT: lh a0, 208(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV32ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 104(sp) +; RV32ZVFHMIN-NEXT: lh a0, 206(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 103(sp) +; RV32ZVFHMIN-NEXT: lh a0, 204(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 102(sp) +; RV32ZVFHMIN-NEXT: lh a0, 202(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 101(sp) +; RV32ZVFHMIN-NEXT: lh a0, 200(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: flt.h a5, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a0, 100(sp) +; RV32ZVFHMIN-NEXT: lh a0, 198(sp) +; RV32ZVFHMIN-NEXT: flt.h a6, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a0, 99(sp) +; RV32ZVFHMIN-NEXT: lh a0, 196(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 98(sp) +; RV32ZVFHMIN-NEXT: lh a0, 194(sp) +; RV32ZVFHMIN-NEXT: sb t1, 65(sp) +; RV32ZVFHMIN-NEXT: sb t0, 66(sp) +; RV32ZVFHMIN-NEXT: sb a7, 67(sp) +; RV32ZVFHMIN-NEXT: sb a6, 68(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a5, 69(sp) +; RV32ZVFHMIN-NEXT: sb a4, 70(sp) +; RV32ZVFHMIN-NEXT: sb a3, 71(sp) +; RV32ZVFHMIN-NEXT: sb a0, 97(sp) +; RV32ZVFHMIN-NEXT: lh a0, 254(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 127(sp) +; RV32ZVFHMIN-NEXT: lh a0, 252(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 126(sp) +; RV32ZVFHMIN-NEXT: lh a0, 250(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 125(sp) +; RV32ZVFHMIN-NEXT: lh a0, 248(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 124(sp) +; RV32ZVFHMIN-NEXT: lh a0, 246(sp) +; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV32ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV32ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV32ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 123(sp) +; RV32ZVFHMIN-NEXT: lh a0, 244(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV32ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV32ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 122(sp) +; RV32ZVFHMIN-NEXT: lh a0, 242(sp) +; RV32ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV32ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV32ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 121(sp) +; RV32ZVFHMIN-NEXT: lh a0, 240(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV32ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV32ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 120(sp) +; RV32ZVFHMIN-NEXT: lh a0, 238(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV32ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV32ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 119(sp) +; RV32ZVFHMIN-NEXT: lh a0, 236(sp) +; RV32ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV32ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV32ZVFHMIN-NEXT: sb a0, 118(sp) +; RV32ZVFHMIN-NEXT: lh a0, 234(sp) +; RV32ZVFHMIN-NEXT: flt.h a4, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV32ZVFHMIN-NEXT: flt.h a5, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV32ZVFHMIN-NEXT: sb a0, 117(sp) +; RV32ZVFHMIN-NEXT: lh a0, 232(sp) +; RV32ZVFHMIN-NEXT: flt.h a6, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV32ZVFHMIN-NEXT: flt.h a7, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV32ZVFHMIN-NEXT: sb a0, 116(sp) +; RV32ZVFHMIN-NEXT: lh a0, 230(sp) +; RV32ZVFHMIN-NEXT: flt.h t0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV32ZVFHMIN-NEXT: flt.h t1, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV32ZVFHMIN-NEXT: sb a0, 115(sp) +; RV32ZVFHMIN-NEXT: lh a0, 228(sp) +; RV32ZVFHMIN-NEXT: sb a6, 76(sp) +; RV32ZVFHMIN-NEXT: sb a5, 77(sp) +; RV32ZVFHMIN-NEXT: sb a4, 78(sp) +; RV32ZVFHMIN-NEXT: sb a3, 79(sp) +; RV32ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a3, 72(sp) +; RV32ZVFHMIN-NEXT: sb t1, 73(sp) +; RV32ZVFHMIN-NEXT: sb t0, 74(sp) +; RV32ZVFHMIN-NEXT: sb a7, 75(sp) +; RV32ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV32ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV32ZVFHMIN-NEXT: sb a0, 114(sp) +; RV32ZVFHMIN-NEXT: addi a0, sp, 64 +; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV32ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV32ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV32ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV32ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV32ZVFHMIN-NEXT: addi sp, s0, -384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV32ZVFHMIN-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32ZVFHMIN-NEXT: .cfi_restore ra +; RV32ZVFHMIN-NEXT: .cfi_restore s0 +; RV32ZVFHMIN-NEXT: addi sp, sp, 384 +; RV32ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVFHMIN-NEXT: ret +; +; RV64ZVFHMIN-LABEL: fcmp_ugt_fv_v64f16_nonans: +; RV64ZVFHMIN: # %bb.0: +; RV64ZVFHMIN-NEXT: addi sp, sp, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 384 +; RV64ZVFHMIN-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64ZVFHMIN-NEXT: .cfi_offset ra, -8 +; RV64ZVFHMIN-NEXT: .cfi_offset s0, -16 +; RV64ZVFHMIN-NEXT: addi s0, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVFHMIN-NEXT: andi sp, sp, -128 +; RV64ZVFHMIN-NEXT: li a2, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; RV64ZVFHMIN-NEXT: vle16.v v8, (a0) +; RV64ZVFHMIN-NEXT: addi a0, sp, 128 +; RV64ZVFHMIN-NEXT: vse16.v v8, (a0) +; RV64ZVFHMIN-NEXT: lh a0, 192(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 96(sp) +; RV64ZVFHMIN-NEXT: lh a0, 190(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 95(sp) +; RV64ZVFHMIN-NEXT: lh a0, 188(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 94(sp) +; RV64ZVFHMIN-NEXT: lh a0, 186(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 93(sp) +; RV64ZVFHMIN-NEXT: lh a0, 184(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 92(sp) +; RV64ZVFHMIN-NEXT: lh a0, 182(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 91(sp) +; RV64ZVFHMIN-NEXT: lh a0, 180(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 90(sp) +; RV64ZVFHMIN-NEXT: lh a0, 178(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 89(sp) +; RV64ZVFHMIN-NEXT: lh a0, 176(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 88(sp) +; RV64ZVFHMIN-NEXT: lh a0, 174(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 87(sp) +; RV64ZVFHMIN-NEXT: lh a0, 172(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 86(sp) +; RV64ZVFHMIN-NEXT: lh a0, 170(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 85(sp) +; RV64ZVFHMIN-NEXT: lh a0, 168(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 84(sp) +; RV64ZVFHMIN-NEXT: lh a0, 166(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 83(sp) +; RV64ZVFHMIN-NEXT: lh a0, 164(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 82(sp) +; RV64ZVFHMIN-NEXT: lh a0, 162(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 81(sp) +; RV64ZVFHMIN-NEXT: lh a0, 160(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a3, 64(sp) +; RV64ZVFHMIN-NEXT: sb a0, 80(sp) +; RV64ZVFHMIN-NEXT: lh a0, 226(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 113(sp) +; RV64ZVFHMIN-NEXT: lh a0, 224(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 112(sp) +; RV64ZVFHMIN-NEXT: lh a0, 222(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 111(sp) +; RV64ZVFHMIN-NEXT: lh a0, 220(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 110(sp) +; RV64ZVFHMIN-NEXT: lh a0, 218(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 109(sp) +; RV64ZVFHMIN-NEXT: lh a0, 216(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 108(sp) +; RV64ZVFHMIN-NEXT: lh a0, 214(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 107(sp) +; RV64ZVFHMIN-NEXT: lh a0, 212(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 7 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 106(sp) +; RV64ZVFHMIN-NEXT: lh a0, 210(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v11, v8, 6 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 5 +; RV64ZVFHMIN-NEXT: vslidedown.vi v13, v8, 4 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 105(sp) +; RV64ZVFHMIN-NEXT: lh a0, 208(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 3 +; RV64ZVFHMIN-NEXT: vslidedown.vi v15, v8, 2 +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 1 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 104(sp) +; RV64ZVFHMIN-NEXT: lh a0, 206(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v11 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v12 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 103(sp) +; RV64ZVFHMIN-NEXT: lh a0, 204(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v13 +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v15 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 102(sp) +; RV64ZVFHMIN-NEXT: lh a0, 202(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 101(sp) +; RV64ZVFHMIN-NEXT: lh a0, 200(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: flt.h a5, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a0, 100(sp) +; RV64ZVFHMIN-NEXT: lh a0, 198(sp) +; RV64ZVFHMIN-NEXT: flt.h a6, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a0, 99(sp) +; RV64ZVFHMIN-NEXT: lh a0, 196(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 98(sp) +; RV64ZVFHMIN-NEXT: lh a0, 194(sp) +; RV64ZVFHMIN-NEXT: sb t1, 65(sp) +; RV64ZVFHMIN-NEXT: sb t0, 66(sp) +; RV64ZVFHMIN-NEXT: sb a7, 67(sp) +; RV64ZVFHMIN-NEXT: sb a6, 68(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a5, 69(sp) +; RV64ZVFHMIN-NEXT: sb a4, 70(sp) +; RV64ZVFHMIN-NEXT: sb a3, 71(sp) +; RV64ZVFHMIN-NEXT: sb a0, 97(sp) +; RV64ZVFHMIN-NEXT: lh a0, 254(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 127(sp) +; RV64ZVFHMIN-NEXT: lh a0, 252(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 126(sp) +; RV64ZVFHMIN-NEXT: lh a0, 250(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 125(sp) +; RV64ZVFHMIN-NEXT: lh a0, 248(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 124(sp) +; RV64ZVFHMIN-NEXT: lh a0, 246(sp) +; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e16, m2, ta, ma +; RV64ZVFHMIN-NEXT: vslidedown.vi v10, v8, 15 +; RV64ZVFHMIN-NEXT: vslidedown.vi v12, v8, 14 +; RV64ZVFHMIN-NEXT: vslidedown.vi v14, v8, 13 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 123(sp) +; RV64ZVFHMIN-NEXT: lh a0, 244(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v16, v8, 12 +; RV64ZVFHMIN-NEXT: vslidedown.vi v18, v8, 11 +; RV64ZVFHMIN-NEXT: vslidedown.vi v20, v8, 10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 122(sp) +; RV64ZVFHMIN-NEXT: lh a0, 242(sp) +; RV64ZVFHMIN-NEXT: vslidedown.vi v22, v8, 9 +; RV64ZVFHMIN-NEXT: vslidedown.vi v8, v8, 8 +; RV64ZVFHMIN-NEXT: vmv.x.s a3, v10 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 121(sp) +; RV64ZVFHMIN-NEXT: lh a0, 240(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a4, v12 +; RV64ZVFHMIN-NEXT: vmv.x.s a5, v14 +; RV64ZVFHMIN-NEXT: vmv.x.s a6, v16 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 120(sp) +; RV64ZVFHMIN-NEXT: lh a0, 238(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s a7, v18 +; RV64ZVFHMIN-NEXT: vmv.x.s t0, v20 +; RV64ZVFHMIN-NEXT: vmv.x.s t1, v22 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 119(sp) +; RV64ZVFHMIN-NEXT: lh a0, 236(sp) +; RV64ZVFHMIN-NEXT: vmv.x.s t2, v8 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a3 +; RV64ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a4 +; RV64ZVFHMIN-NEXT: sb a0, 118(sp) +; RV64ZVFHMIN-NEXT: lh a0, 234(sp) +; RV64ZVFHMIN-NEXT: flt.h a4, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a5 +; RV64ZVFHMIN-NEXT: flt.h a5, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a6 +; RV64ZVFHMIN-NEXT: sb a0, 117(sp) +; RV64ZVFHMIN-NEXT: lh a0, 232(sp) +; RV64ZVFHMIN-NEXT: flt.h a6, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a7 +; RV64ZVFHMIN-NEXT: flt.h a7, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t0 +; RV64ZVFHMIN-NEXT: sb a0, 116(sp) +; RV64ZVFHMIN-NEXT: lh a0, 230(sp) +; RV64ZVFHMIN-NEXT: flt.h t0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t1 +; RV64ZVFHMIN-NEXT: flt.h t1, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, t2 +; RV64ZVFHMIN-NEXT: sb a0, 115(sp) +; RV64ZVFHMIN-NEXT: lh a0, 228(sp) +; RV64ZVFHMIN-NEXT: sb a6, 76(sp) +; RV64ZVFHMIN-NEXT: sb a5, 77(sp) +; RV64ZVFHMIN-NEXT: sb a4, 78(sp) +; RV64ZVFHMIN-NEXT: sb a3, 79(sp) +; RV64ZVFHMIN-NEXT: flt.h a3, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a3, 72(sp) +; RV64ZVFHMIN-NEXT: sb t1, 73(sp) +; RV64ZVFHMIN-NEXT: sb t0, 74(sp) +; RV64ZVFHMIN-NEXT: sb a7, 75(sp) +; RV64ZVFHMIN-NEXT: fmv.h.x fa5, a0 +; RV64ZVFHMIN-NEXT: flt.h a0, fa5, fa0 +; RV64ZVFHMIN-NEXT: sb a0, 114(sp) +; RV64ZVFHMIN-NEXT: addi a0, sp, 64 +; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; RV64ZVFHMIN-NEXT: vle8.v v8, (a0) +; RV64ZVFHMIN-NEXT: vand.vi v8, v8, 1 +; RV64ZVFHMIN-NEXT: vmsne.vi v12, v8, 0 +; RV64ZVFHMIN-NEXT: vsm.v v12, (a1) +; RV64ZVFHMIN-NEXT: addi sp, s0, -384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa sp, 384 +; RV64ZVFHMIN-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64ZVFHMIN-NEXT: .cfi_restore ra +; RV64ZVFHMIN-NEXT: .cfi_restore s0 +; RV64ZVFHMIN-NEXT: addi sp, sp, 384 +; RV64ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVFHMIN-NEXT: ret %a = load <64 x half>, ptr %x %b = insertelement <64 x half> poison, half %y, i32 0 %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll index c76aa7c4d317d..5c17283cacd1b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll @@ -3,8 +3,8 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvkb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-V ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvkb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-V -; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvfh,+zvkb,+zvl64b -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X -; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvfh,+zvkb,+zvl64b -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X +; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvfh,+zvkb,+zvl64b -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X,RV32ZVKB-ZVE32X +; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvfh,+zvkb,+zvl64b -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X,RV64ZVKB-ZVE32X define <8 x i1> @shuffle_v8i1_as_i8_1(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_1: @@ -926,6 +926,136 @@ define <8 x i64> @shuffle_v8i64_as_i128(<8 x i64> %v) { ; ZVKB-V-NEXT: vslideup.vi v12, v8, 1, v0.t ; ZVKB-V-NEXT: vmv.v.v v8, v12 ; ZVKB-V-NEXT: ret +; +; RV32ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i128: +; RV32ZVKB-ZVE32X: # %bb.0: +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV32ZVKB-ZVE32X-NEXT: sw ra, 124(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s0, 120(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s2, 116(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s3, 112(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset ra, -4 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s0, -8 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s2, -12 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s3, -16 +; RV32ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV32ZVKB-ZVE32X-NEXT: lw a2, 0(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a3, 4(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a4, 8(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a5, 12(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a6, 16(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a7, 20(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t0, 24(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t1, 28(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t2, 48(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t3, 52(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t4, 56(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t5, 60(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t6, 32(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s2, 36(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s3, 40(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a1, 44(a1) +; RV32ZVKB-ZVE32X-NEXT: sw t4, 48(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t5, 52(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t2, 56(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t3, 60(sp) +; RV32ZVKB-ZVE32X-NEXT: sw s3, 32(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a1, 36(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t6, 40(sp) +; RV32ZVKB-ZVE32X-NEXT: sw s2, 44(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t0, 16(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t1, 20(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a6, 24(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a7, 28(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a4, 0(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a5, 4(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a2, 8(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a3, 12(sp) +; RV32ZVKB-ZVE32X-NEXT: mv a1, sp +; RV32ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV32ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV32ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV32ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV32ZVKB-ZVE32X-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s2, 116(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s3, 112(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s2 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s3 +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVKB-ZVE32X-NEXT: ret +; +; RV64ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i128: +; RV64ZVKB-ZVE32X: # %bb.0: +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV64ZVKB-ZVE32X-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s2, 104(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s3, 96(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset ra, -8 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s0, -16 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s2, -24 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s3, -32 +; RV64ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV64ZVKB-ZVE32X-NEXT: ld a2, 0(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a3, 8(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a4, 16(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a5, 24(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a6, 32(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a7, 40(a1) +; RV64ZVKB-ZVE32X-NEXT: ld t0, 48(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a1, 56(a1) +; RV64ZVKB-ZVE32X-NEXT: srli t1, a3, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t2, a2, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t3, a5, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t4, a4, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t5, a7, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t6, a6, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s2, a1, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s3, t0, 32 +; RV64ZVKB-ZVE32X-NEXT: sw a1, 48(sp) +; RV64ZVKB-ZVE32X-NEXT: sw s2, 52(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t0, 56(sp) +; RV64ZVKB-ZVE32X-NEXT: sw s3, 60(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a7, 32(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t5, 36(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a6, 40(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t6, 44(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a5, 16(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t3, 20(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a4, 24(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t4, 28(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a3, 0(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t1, 4(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a2, 8(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t2, 12(sp) +; RV64ZVKB-ZVE32X-NEXT: mv a1, sp +; RV64ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV64ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV64ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV64ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV64ZVKB-ZVE32X-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s2, 104(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s3, 96(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s2 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s3 +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> ret <8 x i64> %shuffle } @@ -951,6 +1081,104 @@ define <8 x i64> @shuffle_v8i64_as_i128_2(<8 x i64> %v) { ; ZVKB-V-NEXT: vslideup.vi v12, v8, 1, v0.t ; ZVKB-V-NEXT: vmv.v.v v8, v12 ; ZVKB-V-NEXT: ret +; +; RV32ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i128_2: +; RV32ZVKB-ZVE32X: # %bb.0: +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV32ZVKB-ZVE32X-NEXT: sw ra, 124(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s0, 120(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset ra, -4 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s0, -8 +; RV32ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV32ZVKB-ZVE32X-NEXT: lw a2, 16(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a3, 20(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a4, 24(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a5, 28(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a6, 48(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a7, 52(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t0, 56(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t1, 60(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t2, 32(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t3, 36(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t4, 40(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a1, 44(a1) +; RV32ZVKB-ZVE32X-NEXT: sw t0, 48(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t1, 52(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a6, 56(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a7, 60(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t4, 32(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a1, 36(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t2, 40(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t3, 44(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a4, 16(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a5, 20(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a2, 24(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a3, 28(sp) +; RV32ZVKB-ZVE32X-NEXT: mv a1, sp +; RV32ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV32ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV32ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV32ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV32ZVKB-ZVE32X-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVKB-ZVE32X-NEXT: ret +; +; RV64ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i128_2: +; RV64ZVKB-ZVE32X: # %bb.0: +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV64ZVKB-ZVE32X-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset ra, -8 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s0, -16 +; RV64ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV64ZVKB-ZVE32X-NEXT: ld a2, 16(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a3, 24(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a4, 32(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a5, 40(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a6, 48(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a1, 56(a1) +; RV64ZVKB-ZVE32X-NEXT: srli a7, a3, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t0, a2, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t1, a5, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t2, a4, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t3, a1, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t4, a6, 32 +; RV64ZVKB-ZVE32X-NEXT: sw a1, 48(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t3, 52(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a6, 56(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t4, 60(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a5, 32(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t1, 36(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a4, 40(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t2, 44(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a3, 16(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a7, 20(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a2, 24(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t0, 28(sp) +; RV64ZVKB-ZVE32X-NEXT: mv a1, sp +; RV64ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV64ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV64ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV64ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV64ZVKB-ZVE32X-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> ret <8 x i64> %shuffle } @@ -975,6 +1203,136 @@ define <8 x i64> @shuffle_v8i64_as_i256(<8 x i64> %v) { ; ZVKB-V-NEXT: vrgatherei16.vv v12, v8, v16 ; ZVKB-V-NEXT: vmv.v.v v8, v12 ; ZVKB-V-NEXT: ret +; +; RV32ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i256: +; RV32ZVKB-ZVE32X: # %bb.0: +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV32ZVKB-ZVE32X-NEXT: sw ra, 124(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s0, 120(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s2, 116(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s3, 112(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset ra, -4 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s0, -8 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s2, -12 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s3, -16 +; RV32ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV32ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV32ZVKB-ZVE32X-NEXT: lw a2, 0(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a3, 4(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a4, 8(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a5, 12(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a6, 16(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a7, 20(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t0, 24(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t1, 28(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t2, 32(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t3, 36(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t4, 40(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t5, 44(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t6, 48(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s2, 52(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s3, 56(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a1, 60(a1) +; RV32ZVKB-ZVE32X-NEXT: sw t2, 48(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t3, 52(sp) +; RV32ZVKB-ZVE32X-NEXT: sw s3, 56(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a1, 60(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t6, 32(sp) +; RV32ZVKB-ZVE32X-NEXT: sw s2, 36(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t4, 40(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t5, 44(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a2, 16(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a3, 20(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t0, 24(sp) +; RV32ZVKB-ZVE32X-NEXT: sw t1, 28(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a6, 0(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a7, 4(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a4, 8(sp) +; RV32ZVKB-ZVE32X-NEXT: sw a5, 12(sp) +; RV32ZVKB-ZVE32X-NEXT: mv a1, sp +; RV32ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV32ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV32ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV32ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV32ZVKB-ZVE32X-NEXT: lw ra, 124(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s0, 120(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s2, 116(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s3, 112(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s2 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s3 +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVKB-ZVE32X-NEXT: ret +; +; RV64ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i256: +; RV64ZVKB-ZVE32X: # %bb.0: +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 128 +; RV64ZVKB-ZVE32X-NEXT: sd ra, 120(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s0, 112(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s2, 104(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s3, 96(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset ra, -8 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s0, -16 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s2, -24 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s3, -32 +; RV64ZVKB-ZVE32X-NEXT: addi s0, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa s0, 0 +; RV64ZVKB-ZVE32X-NEXT: andi sp, sp, -64 +; RV64ZVKB-ZVE32X-NEXT: ld a2, 0(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a3, 8(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a4, 16(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a5, 24(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a6, 32(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a7, 40(a1) +; RV64ZVKB-ZVE32X-NEXT: ld t0, 48(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a1, 56(a1) +; RV64ZVKB-ZVE32X-NEXT: srli t1, a4, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t2, a3, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t3, a2, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t4, a5, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t5, t0, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t6, a7, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s2, a6, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s3, a1, 32 +; RV64ZVKB-ZVE32X-NEXT: sw a6, 48(sp) +; RV64ZVKB-ZVE32X-NEXT: sw s2, 52(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a1, 56(sp) +; RV64ZVKB-ZVE32X-NEXT: sw s3, 60(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t0, 32(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t5, 36(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a7, 40(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t6, 44(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a2, 16(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t3, 20(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a5, 24(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t4, 28(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a4, 0(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t1, 4(sp) +; RV64ZVKB-ZVE32X-NEXT: sw a3, 8(sp) +; RV64ZVKB-ZVE32X-NEXT: sw t2, 12(sp) +; RV64ZVKB-ZVE32X-NEXT: mv a1, sp +; RV64ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m8, ta, ma +; RV64ZVKB-ZVE32X-NEXT: vle32.v v8, (a1) +; RV64ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV64ZVKB-ZVE32X-NEXT: addi sp, s0, -128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa sp, 128 +; RV64ZVKB-ZVE32X-NEXT: ld ra, 120(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s0, 112(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s2, 104(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s3, 96(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore ra +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s2 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s3 +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, 128 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> ret <8 x i64> %shuffle } @@ -1003,6 +1361,106 @@ define <8 x i64> @shuffle_v8i64_as_i256_zvl256b(<8 x i64> %v) vscale_range(4,0) ; ZVKB-V-NEXT: vrgatherei16.vv v10, v8, v12 ; ZVKB-V-NEXT: vmv2r.v v8, v10 ; ZVKB-V-NEXT: ret +; +; RV32ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i256_zvl256b: +; RV32ZVKB-ZVE32X: # %bb.0: +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, -16 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 16 +; RV32ZVKB-ZVE32X-NEXT: sw s0, 12(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: sw s1, 8(sp) # 4-byte Folded Spill +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s0, -4 +; RV32ZVKB-ZVE32X-NEXT: .cfi_offset s1, -8 +; RV32ZVKB-ZVE32X-NEXT: lw a2, 48(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a3, 52(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a4, 56(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a5, 60(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a6, 32(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a7, 36(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t0, 40(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t1, 44(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t2, 16(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t3, 20(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t4, 24(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t5, 28(a1) +; RV32ZVKB-ZVE32X-NEXT: lw t6, 0(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s0, 4(a1) +; RV32ZVKB-ZVE32X-NEXT: lw s1, 8(a1) +; RV32ZVKB-ZVE32X-NEXT: lw a1, 12(a1) +; RV32ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m2, ta, ma +; RV32ZVKB-ZVE32X-NEXT: vmv.v.x v8, t2 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t3 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, s1 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a1 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t6 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, s0 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t4 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t5 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a2 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a3 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t0 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t1 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a6 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a7 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a4 +; RV32ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a5 +; RV32ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV32ZVKB-ZVE32X-NEXT: lw s0, 12(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: lw s1, 8(sp) # 4-byte Folded Reload +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV32ZVKB-ZVE32X-NEXT: .cfi_restore s1 +; RV32ZVKB-ZVE32X-NEXT: addi sp, sp, 16 +; RV32ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV32ZVKB-ZVE32X-NEXT: ret +; +; RV64ZVKB-ZVE32X-LABEL: shuffle_v8i64_as_i256_zvl256b: +; RV64ZVKB-ZVE32X: # %bb.0: +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, -16 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 16 +; RV64ZVKB-ZVE32X-NEXT: sd s0, 8(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: sd s1, 0(sp) # 8-byte Folded Spill +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s0, -8 +; RV64ZVKB-ZVE32X-NEXT: .cfi_offset s1, -16 +; RV64ZVKB-ZVE32X-NEXT: ld a2, 32(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a3, 40(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a4, 48(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a5, 56(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a6, 0(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a7, 8(a1) +; RV64ZVKB-ZVE32X-NEXT: ld t0, 16(a1) +; RV64ZVKB-ZVE32X-NEXT: ld a1, 24(a1) +; RV64ZVKB-ZVE32X-NEXT: srli t1, a5, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t2, a2, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t3, a3, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t4, a4, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t5, a1, 32 +; RV64ZVKB-ZVE32X-NEXT: srli t6, a6, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s0, a7, 32 +; RV64ZVKB-ZVE32X-NEXT: srli s1, t0, 32 +; RV64ZVKB-ZVE32X-NEXT: vsetivli zero, 16, e32, m2, ta, ma +; RV64ZVKB-ZVE32X-NEXT: vmv.v.x v8, t0 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, s1 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a7 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, s0 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a6 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t6 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a1 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t5 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a4 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t4 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a3 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t3 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a2 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t2 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, a5 +; RV64ZVKB-ZVE32X-NEXT: vslide1down.vx v8, v8, t1 +; RV64ZVKB-ZVE32X-NEXT: vse32.v v8, (a0) +; RV64ZVKB-ZVE32X-NEXT: ld s0, 8(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: ld s1, 0(sp) # 8-byte Folded Reload +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s0 +; RV64ZVKB-ZVE32X-NEXT: .cfi_restore s1 +; RV64ZVKB-ZVE32X-NEXT: addi sp, sp, 16 +; RV64ZVKB-ZVE32X-NEXT: .cfi_def_cfa_offset 0 +; RV64ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i64> %v, <8 x i64> poison, <8 x i32> ret <8 x i64> %shuffle } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll index 5aa3a246d7616..0561ee9addc7b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfw-web-simplification.ll @@ -5,7 +5,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+f,+d -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING,ZVFHMIN ; Check that the default value enables the web folding and ; that it is bigger than 3. -; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+f,+d -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING,ZVFH define void @vfwmul_v2f116_multiple_users(ptr %x, ptr %y, ptr %z, <2 x half> %a, <2 x half> %b, <2 x half> %b2) { ; NO_FOLDING1-LABEL: vfwmul_v2f116_multiple_users: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll index 90e9ffdcb320a..eeb232ec1555c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s define <2 x i16> @vwmulu_v2i16(ptr %x, ptr %y) { ; CHECK-LABEL: vwmulu_v2i16: @@ -750,28 +750,25 @@ define <2 x i64> @vwmulu_vx_v2i64_i8(ptr %x, ptr %y) { ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: lb a1, 0(a1) -; RV32-NEXT: vle32.v v25, (a0) -; RV32-NEXT: srai a0, a1, 31 +; RV32-NEXT: lbu a1, 0(a1) +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw a1, 8(sp) -; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-NEXT: vzext.vf2 v27, v25 -; RV32-NEXT: vmul.vv v8, v26, v27 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vzext.vf2 v10, v8 +; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmulu_vx_v2i64_i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vle32.v v25, (a0) -; RV64-NEXT: lb a0, 0(a1) -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV64-NEXT: vzext.vf2 v26, v25 -; RV64-NEXT: vmul.vx v8, v26, a0 +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64-NEXT: vle32.v v9, (a0) +; RV64-NEXT: lbu a0, 0(a1) +; RV64-NEXT: vwmulu.vx v8, v9, a0 ; RV64-NEXT: ret %a = load <2 x i32>, ptr %x %b = load i8, ptr %y @@ -788,28 +785,25 @@ define <2 x i64> @vwmulu_vx_v2i64_i16(ptr %x, ptr %y) { ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: lh a1, 0(a1) -; RV32-NEXT: vle32.v v25, (a0) -; RV32-NEXT: srai a0, a1, 31 +; RV32-NEXT: lhu a1, 0(a1) +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw a1, 8(sp) -; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-NEXT: vzext.vf2 v27, v25 -; RV32-NEXT: vmul.vv v8, v26, v27 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vzext.vf2 v10, v8 +; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmulu_vx_v2i64_i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vle32.v v25, (a0) -; RV64-NEXT: lh a0, 0(a1) -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV64-NEXT: vzext.vf2 v26, v25 -; RV64-NEXT: vmul.vx v8, v26, a0 +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64-NEXT: vle32.v v9, (a0) +; RV64-NEXT: lhu a0, 0(a1) +; RV64-NEXT: vwmulu.vx v8, v9, a0 ; RV64-NEXT: ret %a = load <2 x i32>, ptr %x %b = load i16, ptr %y @@ -826,28 +820,25 @@ define <2 x i64> @vwmulu_vx_v2i64_i32(ptr %x, ptr %y) { ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32-NEXT: lw a1, 0(a1) -; RV32-NEXT: vle32.v v25, (a0) -; RV32-NEXT: srai a0, a1, 31 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw a1, 8(sp) -; RV32-NEXT: sw a0, 12(sp) +; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-NEXT: vzext.vf2 v27, v25 -; RV32-NEXT: vmul.vv v8, v26, v27 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vzext.vf2 v10, v8 +; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmulu_vx_v2i64_i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vle32.v v25, (a0) +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lw a0, 0(a1) -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV64-NEXT: vzext.vf2 v26, v25 -; RV64-NEXT: vmul.vx v8, v26, a0 +; RV64-NEXT: vwmulu.vx v8, v9, a0 ; RV64-NEXT: ret %a = load <2 x i32>, ptr %x %b = load i32, ptr %y @@ -864,28 +855,27 @@ define <2 x i64> @vwmulu_vx_v2i64_i64(ptr %x, ptr %y) { ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV32-NEXT: lw a2, 4(a1) -; RV32-NEXT: lw a1, 0(a1) -; RV32-NEXT: vle32.v v25, (a0) -; RV32-NEXT: sw a2, 12(sp) -; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lw a2, 0(a1) +; RV32-NEXT: lw a1, 4(a1) +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vle32.v v8, (a0) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vlse64.v v26, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV32-NEXT: vzext.vf2 v27, v25 -; RV32-NEXT: vmul.vv v8, v26, v27 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vzext.vf2 v10, v8 +; RV32-NEXT: vmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vwmulu_vx_v2i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu -; RV64-NEXT: vle32.v v25, (a0) +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: ld a0, 0(a1) -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu -; RV64-NEXT: vzext.vf2 v26, v25 -; RV64-NEXT: vmul.vx v8, v26, a0 +; RV64-NEXT: vzext.vf2 v9, v8 +; RV64-NEXT: vmul.vx v8, v9, a0 ; RV64-NEXT: ret %a = load <2 x i32>, ptr %x %b = load i64, ptr %y diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge.ll index ef1de87b3b8b7..3fb5aa02230b4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmerge.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ -; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck -check-prefixes=CHECK,RV32 %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ -; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck -check-prefixes=CHECK,RV64 %s declare @llvm.riscv.vmerge.nxv1i8.nxv1i8( , @@ -972,6 +972,22 @@ declare @llvm.riscv.vmerge.nxv1i64.i64( iXLen); define @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv1i64.i64( poison, @@ -991,6 +1007,22 @@ declare @llvm.riscv.vmerge.nxv2i64.i64( iXLen); define @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv2i64.i64( poison, @@ -1010,6 +1042,22 @@ declare @llvm.riscv.vmerge.nxv4i64.i64( iXLen); define @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv4i64.i64( poison, @@ -1029,6 +1077,22 @@ declare @llvm.riscv.vmerge.nxv8i64.i64( iXLen); define @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: ret entry: %a = call @llvm.riscv.vmerge.nxv8i64.i64( poison, diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll new file mode 100644 index 0000000000000..093d172c5c1b1 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll @@ -0,0 +1,24 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space" +; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] printf + +; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space. + +@.str = private unnamed_addr addrspace(1) constant [4 x i8] c"%d\0A\00", align 1 + +declare spir_func i32 @printf(ptr addrspace(4), ...) + +define spir_kernel void @test_kernel() { +entry: + ; Format string in addrspace(1) → cast to addrspace(4) + %format = addrspacecast ptr addrspace(1) @.str to ptr addrspace(4) + %val = alloca i32, align 4 + store i32 123, ptr %val, align 4 + %loaded = load i32, ptr %val, align 4 + + ; Call printf with non-constant format string + %call = call spir_func i32 (ptr addrspace(4), ...) @printf(ptr addrspace(4) %format, i32 %loaded) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll new file mode 100644 index 0000000000000..b54d59b30309f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll @@ -0,0 +1,48 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s +; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space" +; CHECK: %[[#ExtInstSetId:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#TypeInt32Id:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#TypeInt8Id:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#TypeInt64Id:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#TypeArrayId:]] = OpTypeArray %[[#TypeInt8Id]] %[[#]] +; CHECK-DAG: %[[#ConstantStorClassGlobalPtrTy:]] = OpTypePointer UniformConstant %[[#TypeArrayId]] +; CHECK-DAG: %[[#WGStorClassGlobalPtrTy:]] = OpTypePointer Workgroup %[[#TypeArrayId]] +; CHECK-DAG: %[[#CrossWFStorClassGlobalPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeArrayId]] +; CHECK-DAG: %[[#FunctionStorClassPtrTy:]] = OpTypePointer Function %[[#TypeInt8Id]] +; CHECK-DAG: %[[#WGStorClassPtrTy:]] = OpTypePointer Workgroup %[[#TypeInt8Id]] +; CHECK-DAG: %[[#CrossWFStorClassPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeInt8Id]] +; CHECK: %[[#ConstantCompositeId:]] = OpConstantComposite %[[#TypeArrayId]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpVariable %[[#ConstantStorClassGlobalPtrTy]] UniformConstant %[[#ConstantCompositeId]] +; CHECK: %[[#]] = OpVariable %[[#CrossWFStorClassGlobalPtrTy]] CrossWorkgroup %[[#ConstantCompositeId]] +; CHECK: %[[#]] = OpVariable %[[#WGStorClassGlobalPtrTy]] Workgroup %[[#ConstantCompositeId]] +; CHECK: %[[#GEP1:]] = OpInBoundsPtrAccessChain %[[#FunctionStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP1]] +; CHECK: %[[#GEP2:]] = OpInBoundsPtrAccessChain %[[#CrossWFStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP2]] +; CHECK: %[[#GEP3:]] = OpInBoundsPtrAccessChain %[[#WGStorClassPtrTy]] %[[#]] %[[#]] %[[#]] +; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP3]] + +; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space. + +@0 = internal unnamed_addr addrspace(2) constant [6 x i8] c"Test\0A\00", align 1 +@1 = internal unnamed_addr addrspace(1) constant [6 x i8] c"Test\0A\00", align 1 +@2 = internal unnamed_addr addrspace(3) constant [6 x i8] c"Test\0A\00", align 1 + +define spir_kernel void @test() { + %tmp1 = alloca [6 x i8], align 1 + call void @llvm.memcpy.p0.p2.i64(ptr align 1 %tmp1, ptr addrspace(2) align 1 @0, i64 6, i1 false) + %1 = getelementptr inbounds [6 x i8], ptr %tmp1, i32 0, i32 0 + %2 = call spir_func i32 @_Z18__spirv_ocl_printfPc(ptr %1) + %3 = getelementptr inbounds [6 x i8], ptr addrspace(1) @1, i32 0, i32 0 + %4 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1) %3) + %5 = getelementptr inbounds [6 x i8], ptr addrspace(3) @2, i32 0, i32 0 + %6 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3) %5) + ret void +} + +declare spir_func i32 @_Z18__spirv_ocl_printfPc(ptr) +declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1)) +declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3)) +declare void @llvm.memcpy.p0.p2.i64(ptr captures(none), ptr addrspace(2) captures(none) readonly, i64, i1) diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll new file mode 100644 index 0000000000000..3624f149cb491 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll @@ -0,0 +1,19 @@ +; RUN: not llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_bindless_images %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR + +; CHECK-ERROR: LLVM ERROR: Parameter value must be a 32-bit scalar in case of Physical32 addressing model or a 64-bit scalar in case of Physical64 addressing model + +target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64" +target triple = "spir64-unknown-unknown" + +define spir_func void @foo(i32 %in) { + %img = call spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32 %in) + %samp = call spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64 42) + %sampImage = call spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64 43) + ret void +} + +declare spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32) + +declare spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64) + +declare spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64) diff --git a/llvm/test/CodeGen/SPIRV/image_store.ll b/llvm/test/CodeGen/SPIRV/image_store.ll new file mode 100644 index 0000000000000..a70651c974f36 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/image_store.ll @@ -0,0 +1,22 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Image types may be represented in two ways while translating to SPIR-V: +; - OpenCL form, for example, '%opencl.image2d_ro_t', +; - SPIR-V form, for example, '%spirv.Image._void_1_0_0_0_0_0_0', +; but it is still one type which should be translated to one SPIR-V type. +; +; The test checks that the code below is successfully translated and only one +; SPIR-V type for images is generated (no duplicate OpTypeImage instructions). + +; CHECK: %[[#]] = OpTypeImage %[[#]] 2D +; CHECK-NOT: %[[#]] = OpTypeImage %[[#]] 2D + +declare spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1), ptr addrspace(2), <2 x float>, float) + +define spir_kernel void @read_image(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler){ +entry: + %spirvimg.addr = alloca target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), align 8 + %val = call <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler, <2 x float> zeroinitializer, float 0.0) + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll new file mode 100644 index 0000000000000..b788f34bf7238 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll @@ -0,0 +1,28 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-LABEL: Begin function original_testcase +define fastcc void @original_testcase() { +top: + ; CHECK: OpCompositeInsert + %0 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0 + ret void +} + +; CHECK-LABEL: Begin function additional_testcases +define fastcc void @additional_testcases() { +top: + ; Test with different pointer types + ; CHECK: OpCompositeInsert + %1 = insertvalue [1 x ptr] zeroinitializer, ptr undef, 0 + ; CHECK-NEXT: OpCompositeInsert + %2 = insertvalue {ptr, i32} zeroinitializer, ptr poison, 0 + ; CHECK-NEXT: OpCompositeInsert + %3 = insertvalue {ptr, ptr} undef, ptr null, 0 + + ; Test with undef aggregate + ; CHECK-NEXT: OpCompositeInsert + %4 = insertvalue [1 x ptr] undef, ptr undef, 0 + + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll new file mode 100644 index 0000000000000..49bb8eac10be8 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll @@ -0,0 +1,56 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: OpFOrdEqual +; CHECK-DAG: OpFOrdGreaterThan +; CHECK-DAG: OpFOrdGreaterThanEqual +; CHECK-DAG: OpFOrdLessThan +; CHECK-DAG: OpFOrdLessThanEqual +; CHECK-DAG: OpFOrdNotEqual +; CHECK-DAG: OpOrdered +; CHECK-DAG: OpFUnordEqual +; CHECK-DAG: OpFUnordGreaterThan +; CHECK-DAG: OpFUnordGreaterThanEqual +; CHECK-DAG: OpFUnordLessThan +; CHECK-DAG: OpFUnordLessThanEqual +; CHECK-DAG: OpFUnordNotEqual +; CHECK-DAG: OpUnordered + +define dso_local spir_kernel void @test(float %a){ +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oeq", metadata !"fpexcept.strict") + %cmp1 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"ogt", metadata !"fpexcept.strict") + %cmp2 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oge", metadata !"fpexcept.strict") + %cmp3 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"olt", metadata !"fpexcept.strict") + %cmp4 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ole", metadata !"fpexcept.strict") + %cmp5 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"one", metadata !"fpexcept.strict") + %cmp6 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ord", metadata !"fpexcept.strict") + %cmp7 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ueq", metadata !"fpexcept.strict") + %cmp8 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ugt", metadata !"fpexcept.strict") + %cmp9 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uge", metadata !"fpexcept.strict") + %cmp10 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ult", metadata !"fpexcept.strict") + %cmp11 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ule", metadata !"fpexcept.strict") + %cmp12 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"une", metadata !"fpexcept.strict") + %cmp13 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uno", metadata !"fpexcept.strict") + + %or1 = or i1 %cmp, %cmp1 + %or2 = or i1 %or1, %cmp2 + %or3 = or i1 %or2, %cmp3 + %or4 = or i1 %or3, %cmp4 + %or5 = or i1 %or4, %cmp5 + %or6 = or i1 %or5, %cmp6 + %or7 = or i1 %or6, %cmp7 + %or8 = or i1 %or7, %cmp8 + %or9 = or i1 %or8, %cmp9 + %or10 = or i1 %or9, %cmp10 + %or11 = or i1 %or10, %cmp11 + %or12 = or i1 %or11, %cmp12 + %or13 = or i1 %or12, %cmp13 + br i1 %or13, label %true_block, label %false_block +true_block: + ret void +false_block: + ret void +} +declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll new file mode 100644 index 0000000000000..fd8cb9d7ff6f0 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll @@ -0,0 +1,14 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s + +; CHECK: OpNop +; CHECK-NEXT: OpReturn + +declare void @llvm.debugtrap() + +define spir_kernel void @foo(ptr addrspace(1) %a){ +entry: + %a.addr = alloca ptr addrspace(1), align 4 + store ptr addrspace(1) %a, ptr %a.addr, align 4 + call void @llvm.debugtrap() + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll new file mode 100644 index 0000000000000..f6434e94a9d79 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll @@ -0,0 +1,114 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-DAG: %[[#extinst_id:]] = OpExtInstImport "OpenCL.std" +; CHECK-DAG: %[[#float_32_type:]] = OpTypeFloat 32 +; CHECK-DAG: %[[#int_32_type:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#fn_ptr_type_i32:]] = OpTypePointer Function %[[#int_32_type]] +; CHECK-DAG: %[[#const_negzero:]] = OpConstant %[[#float_32_type]] -0 +; CHECK-DAG: %[[#vec2_float_type:]] = OpTypeVector %[[#float_32_type]] 2 +; CHECK-DAG: %[[#vec2_int_type:]] = OpTypeVector %[[#int_32_type]] 2 +; CHECK-DAG: %[[#fn_ptr_type_vec2_i32:]] = OpTypePointer Function %[[#vec2_int_type]] +; CHECK-DAG: %[[#vec2_null:]] = OpConstantNull %[[#vec2_float_type]] +; CHECK-DAG: %[[#scalar_null:]] = OpConstantNull %[[#float_32_type]] +; CHECK-DAG: %[[#const_composite1:]] = OpConstantComposite %[[#vec2_float_type]] %[[#scalar_null]] %[[#const_negzero]] +; CHECK-DAG: %[[#vec4_float_type:]] = OpTypeVector %[[#float_32_type]] 4 +; CHECK-DAG: %[[#vec4_int_type:]] = OpTypeVector %[[#int_32_type]] 4 +; CHECK-DAG: %[[#fn_ptr_type_vec4_i32:]] = OpTypePointer Function %[[#vec4_int_type]] +; CHECK-DAG: %[[#const_composite2:]] = OpConstantComposite %[[#vec4_float_type]] %[[#const_16:]] %[[#const_neg32:]] %[[#const_0:]] %[[#const_9999:]] +; CHECK-DAG: %[[#float_64_type:]] = OpTypeFloat 64 +; CHECK-DAG: %[[#vec2_double_type:]] = OpTypeVector %[[#float_64_type]] 2 + +; CHECK: %[[#]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var1:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst1:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#const_negzero]] %[[#var1]] +; CHECK: %[[#exp_part_var:]] = OpLoad %[[#int_32_type]] %[[#var1]] +; CHECK: OpReturnValue %[[#exp_part_var]] +define i32 @frexp_negzero(float %x) { + %ret = call { float, i32 } @llvm.frexp.f32.i32(float -0.0) + %f_part = extractvalue { float, i32 } %ret, 0 + %exp_part = extractvalue { float, i32 } %ret, 1 + ret i32 %exp_part +} + +; CHECK: %[[#x_var4:]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var10:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst10:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var4]] %[[#var10]] +; CHECK: %[[#exp_part_var2:]] = OpLoad %[[#int_32_type]] %[[#var10]] +; CHECK: OpReturnValue %[[#exp_part_var2]] +define i32 @frexp_frexp_get_int(float %x) { + %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x) + %f_part = extractvalue { float, i32 } %frexp0, 0 + %exp_part = extractvalue { float, i32 } %frexp0, 1 + ret i32 %exp_part +} + +; CHECK: %[[#var3:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst3:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#vec2_null]] %[[#var3]] +; CHECK: %[[#f_part_var2:]] = OpLoad %[[#vec2_int_type]] %[[#var3]] +; CHECK: OpReturnValue %[[#extinst3]] +define <2 x float> @frexp_zero_vector() { + %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> zeroinitializer) + %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0 + %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1 + ret <2 x float> %f_part +} + +; CHECK: %[[#var4:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst4:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#const_composite1]] %[[#var4]] +; CHECK: %[[#f_part_var3:]] = OpLoad %[[#vec2_int_type]] %[[#var4]] +; CHECK: OpReturnValue %[[#extinst4]] +define <2 x float> @frexp_zero_negzero_vector() { + %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> ) + %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0 + %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1 + ret <2 x float> %f_part +} + +; CHECK: %[[#var5:]] = OpVariable %[[#fn_ptr_type_vec4_i32]] Function +; CHECK: %[[#extinst5:]] = OpExtInst %[[#vec4_float_type]] %[[#extinst_id]] frexp %[[#const_composite2]] %[[#var5]] +; CHECK: %[[#f_part_var4:]] = OpLoad %[[#vec4_int_type]] %[[#var5]] +; CHECK: OpReturnValue %[[#extinst5]] +define <4 x float> @frexp_nonsplat_vector() { + %ret = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> ) + %f_part = extractvalue { <4 x float>, <4 x i32> } %ret, 0 + %exp_part = extractvalue { <4 x float>, <4 x i32> } %ret, 1 + ret <4 x float> %f_part +} + +; CHECK: %[[#x_var2:]] = OpFunctionParameter %[[#float_32_type]] +; CHECK: %[[#var6:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#var7:]] = OpVariable %[[#fn_ptr_type_i32]] Function +; CHECK: %[[#extinst6:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var2]] %[[#var6]] +; CHECK: %[[#load1:]] = OpLoad %[[#int_32_type]] %[[#var6]] +; CHECK: %[[#extinst7:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#extinst6]] %[[#var7]] +; CHECK: %[[#f_part_var5:]] = OpLoad %[[#int_32_type]] %[[#var7]] +; CHECK: OpReturnValue %[[#extinst7]] +define float @frexp_frexp(float %x) { + %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x) + %frexp0_f_part = extractvalue { float, i32 } %frexp0, 0 + %frexp0_exp_part = extractvalue { float, i32 } %frexp0, 1 + %frexp1 = call { float, i32 } @llvm.frexp.f32.i32(float %frexp0_f_part) + %frexp1_f_part = extractvalue { float, i32 } %frexp1, 0 + %frexp1_exp_part = extractvalue { float, i32 } %frexp1, 1 + ret float %frexp1_f_part +} + +; CHECK: %[[#x_var3:]] = OpFunctionParameter %[[#vec2_double_type]] +; CHECK: %[[#var9:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function +; CHECK: %[[#extinst9:]] = OpExtInst %[[#vec2_double_type]] %[[#extinst_id]] frexp %[[#x_var3]] %[[#var9]] +; CHECK: %[[#f_part_var6:]] = OpLoad %[[#vec2_int_type]] %[[#var9]] +; CHECK: OpReturnValue %[[#extinst9]] +define <2 x double> @frexp_frexp_vector(<2 x double> %x) { + %frexp0 = call { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double> %x) + %f_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 0 + %exp_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 1 + ret <2 x double> %f_part +} + +declare { float, i32 } @llvm.frexp.f32.i32(float) +declare { double, i32 } @llvm.frexp.f64.i32(double) +declare { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float>) +declare { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float>) +declare { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double>) +declare { float, i8 } @llvm.frexp.f32.i8(float) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll index a15a80754cd60..b3ef6d6bbced9 100644 --- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll @@ -11,7 +11,6 @@ define spir_kernel void @foo(ptr %p) { entry: call void @llvm.trap() - call void @llvm.debugtrap() call void @llvm.ubsantrap(i8 100) %r1 = call ptr @llvm.invariant.start.p0(i64 1024, ptr %p) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll new file mode 100644 index 0000000000000..51b76640cc056 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll @@ -0,0 +1,86 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV-NOT: llvm.memmove + +; CHECK-DAG: %[[#Int8:]] = OpTypeInt 8 0 +; CHECK-DAG: %[[#Int32:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Int64:]] = OpTypeInt 64 0 +; CHECK-DAG: %[[#Ptr_CrossWG_8:]] = OpTypePointer CrossWorkgroup %[[#Int8]] +; CHECK-DAG: %[[#Ptr_Generic_32:]] = OpTypePointer Generic %[[#Int32]] +; CHECK-DAG: %[[#Const_64:]] = OpConstant %[[#Int32]] 64 +; CHECK-DAG: %[[#Const_36:]] = OpConstant %[[#Int32]] 36 +; CHECK-DAG: %[[#Const_30:]] = OpConstant %[[#Int32]] 30 +; CHECK-DAG: %[[#Const_32_64:]] = OpConstant %[[#Int64]] 32 + +; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Size1:]] = OpUConvert %[[#Int64]] %[[#Const_64]] +; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size1]] Aligned 64 + +; CHECK: %[[#Src:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#CastDst2:]] = OpGenericCastToPtr %[[#Ptr_CrossWG_8]] %[[#GenPtr:]] +; CHECK: %[[#Size2:]] = OpUConvert %[[#Int64]] %[[#Const_36]] +; CHECK: OpCopyMemorySized %[[#CastDst2]] %[[#Src]] %[[#Size2]] Aligned 64 + +; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]] +; CHECK: %[[#Size3:]] = OpUConvert %[[#Int64]] %[[#Const_30]] +; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size3]] Aligned 1 + +; CHECK: %[[#Phi:]] = OpPhi %[[#Ptr_Generic_32]] %[[#Op1:]] %[[#Lbl1:]] %[[#Op2:]] %[[#Lbl2:]] +; CHECK: %[[#Cast:]] = OpPtrCastToGeneric %[[#]] %[[#]] +; CHECK: OpCopyMemorySized %[[#Cast]] %[[#Phi]] %[[#Const_32_64]] Aligned 8 + +%struct.SomeStruct = type { <16 x float>, i32, [60 x i8] } +%class.kfunc = type <{ i32, i32, i32, [4 x i8] }> + +@InvocIndex = external local_unnamed_addr addrspace(1) constant i64, align 8 +@"func_object1" = internal addrspace(3) global %class.kfunc zeroinitializer, align 8 + +define spir_kernel void @test_full_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(1)* captures(none) %out) { + %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)* + %2 = bitcast %struct.SomeStruct addrspace(1)* %out to i8 addrspace(1)* + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %2, i8 addrspace(1)* align 64 %1, i32 64, i1 false) + ret void +} + +define spir_kernel void @test_partial_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(4)* captures(none) %out) { + %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)* + %2 = bitcast %struct.SomeStruct addrspace(4)* %out to i8 addrspace(4)* + %3 = addrspacecast i8 addrspace(4)* %2 to i8 addrspace(1)* + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %3, i8 addrspace(1)* align 64 %1, i32 36, i1 false) + ret void +} + +define spir_kernel void @test_array(i8 addrspace(1)* %in, i8 addrspace(1)* %out) { + call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i32 30, i1 false) + ret void +} + +define weak_odr dso_local spir_kernel void @test_phi() local_unnamed_addr { +entry: + %0 = alloca i32, align 8 + %1 = addrspacecast i32* %0 to i32 addrspace(4)* + %2 = load i64, i64 addrspace(1)* @InvocIndex, align 8 + %cmp = icmp eq i64 %2, 0 + br i1 %cmp, label %leader, label %entry.merge_crit_edge + +entry.merge_crit_edge: ; preds = %entry + %3 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)* + br label %merge + +leader: ; preds = %entry + %4 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)* + br label %merge + +merge: ; preds = %entry.merge_crit_edge, %leader + %phi = phi i8 addrspace(4)* [ %3, %entry.merge_crit_edge ], [ %4, %leader ] + %5 = addrspacecast i8 addrspace(3)* bitcast (%class.kfunc addrspace(3)* @"func_object1" to i8 addrspace(3)*) to i8 addrspace(4)* + call void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* align 8 dereferenceable(32) %5, i8 addrspace(4)* align 8 dereferenceable(32) %phi, i64 32, i1 false) + ret void +} + +declare void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* captures(none) writeonly, i8 addrspace(4)* captures(none) readonly, i64, i1 immarg) + +declare void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* captures(none), i8 addrspace(1)* captures(none) readonly, i32, i1) diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll new file mode 100644 index 0000000000000..52f939faf0a9f --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll @@ -0,0 +1,30 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -filetype=obj -o - | spirv-val %} +; XFAIL: * +;@llvm.sadd.with.overflow and @llvm.ssub.with.overflow has not been implemented. + +define spir_func void @test_sadd_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) { +entry: + %res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %res, 0 + %ofl = extractvalue { i32, i1 } %res, 1 + store i32 %val, ptr %out_result + %zext_ofl = zext i1 %ofl to i8 + store i8 %zext_ofl, ptr %out_overflow + ret void +} + +declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) + +define spir_func void @test_ssub_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) { +entry: + %res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue { i32, i1 } %res, 0 + %ofl = extractvalue { i32, i1 } %res, 1 + store i32 %val, ptr %out_result + %zext_ofl = zext i1 %ofl to i8 + store i8 %zext_ofl, ptr %out_overflow + ret void +} + +declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll index e405ef0ed58a5..5e66b8b639f17 100644 --- a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll +++ b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll @@ -7,10 +7,11 @@ ;; ;; Positive tests: ;; -; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV ;; ;; Negative tests: ;; +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV-NEGATIVE ;; Check that backend is able to skip nsw/nuw attributes if extension is ;; disabled implicitly or explicitly and if max SPIR-V version is lower then 1.4 diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll new file mode 100644 index 0000000000000..c8953c701d47d --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll @@ -0,0 +1,11 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: [[#PtrT:]] = OpTypePointer Workgroup %[[#]] +; CHECK-SPIRV: %[[#]] = OpVariable %[[#PtrT]] Workgroup + +@test_atomic_fn.L = internal addrspace(3) global [64 x i32] zeroinitializer, align 4 + +define spir_kernel void @test_atomic_fn() { + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll new file mode 100644 index 0000000000000..607997d034f09 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll @@ -0,0 +1,140 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpCapability Kernel +; CHECK: OpCapability Addresses +; CHECK: OpCapability Pipes +; CHECK: OpCapability Int8 +; CHECK: OpCapability GenericPointer + +; CHECK-DAG: %[[#PipeWriteTy:]] = OpTypePipe WriteOnly +; CHECK-DAG: %[[#PipeReadTy:]] = OpTypePipe ReadOnly +; CHECK-DAG: %[[#ReserveIdTy:]] = OpTypeReserveId +; CHECK-DAG: %[[#BoolTy:]] = OpTypeBool +; CHECK-DAG: %[[#Int32Ty:]] = OpTypeInt 32 0 +; CHECK-DAG: %[[#Uint1:]] = OpConstant %[[#Int32Ty]] 1 +; CHECK-DAG: %[[#Uint2:]] = OpConstant %[[#Int32Ty]] 2 +; CHECK-DAG: %[[#Uint3:]] = OpConstant %[[#Int32Ty]] 3 +; CHECK-DAG: %[[#Uint4:]] = OpConstant %[[#Int32Ty]] 4 +; CHECK-DAG: %[[#NullUint:]] = OpConstantNull %[[#Int32Ty]] + +; CHECK: OpFunction +; CHECK: %[[#FuncParam1:]] = OpFunctionParameter %[[#PipeWriteTy]] +; CHECK: %[[#FuncParam2:]] = OpFunctionParameter %[[#PipeReadTy]] + +; CHECK: %[[#BasicWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#BasicWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#BasicReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#BasicReadReserve]] %[[#Uint4]] %[[#Uint4]] + +; --- Reserved pipe operations --- +; CHECK: %[[#ReservedWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedWrite:]] = OpReservedWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#IsValidWrite:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedWriteReserve]] +; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#ReservedRead:]] = OpReservedReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#IsValidRead:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedReadReserve]] +; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#Uint4]] %[[#Uint4]] + +; --- Pipe packet queries --- +; CHECK: %[[#MaxPacketsWO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#MaxPacketsWO]] Aligned 4 +; CHECK: %[[#NumPacketsWO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#NumPacketsWO]] Aligned 4 +; CHECK: %[[#MaxPacketsRO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#MaxPacketsRO]] Aligned 4 +; CHECK: %[[#NumPacketsRO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpStore %[[#]] %[[#NumPacketsRO]] Aligned 4 + +; --- Workgroup operations --- +; CHECK: %[[#WorkgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]] +; CHECK: OpGroupCommitWritePipe %[[#Uint2]] %[[#FuncParam1]] %[[#WorkgroupWriteReserve]] %[[#Uint1]] %[[#Uint1]] +; CHECK: %[[#WorkgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]] +; CHECK: OpGroupCommitReadPipe %[[#Uint2]] %[[#FuncParam2]] %[[#WorkgroupReadReserve]] %[[#Uint1]] %[[#Uint1]] + +; --- Subgroup operations --- +; CHECK: %[[#SubgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpGroupCommitWritePipe %[[#Uint3]] %[[#FuncParam1]] %[[#SubgroupWriteReserve]] %[[#Uint4]] %[[#Uint4]] +; CHECK: %[[#SubgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]] +; CHECK: OpGroupCommitReadPipe %[[#Uint3]] %[[#FuncParam2]] %[[#SubgroupReadReserve]] %[[#Uint4]] %[[#Uint4]] + +define spir_kernel void @test_pipe_builtins( + target("spirv.Pipe", 1) %out_pipe, + target("spirv.Pipe", 0) %in_pipe, + ptr addrspace(4) %src, + ptr addrspace(4) %dst, + ptr addrspace(1) %max_packets_wo, + ptr addrspace(1) %num_packets_wo, + ptr addrspace(1) %max_packets_ro, + ptr addrspace(1) %num_packets_ro +) { +entry: + ; Basic pipe operations + %0 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + %1 = call spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1) %out_pipe, ptr addrspace(4) %src, i32 4, i32 4) + call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %0, i32 4, i32 4) + + %2 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + %3 = call spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0) %in_pipe, ptr addrspace(4) %dst, i32 4, i32 4) + call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %2, i32 4, i32 4) + + ; Reserved pipe operations + %4 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + %5 = call spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 0, ptr addrspace(4) %src, i32 4, i32 4) + %6 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %4) + call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 4, i32 4) + + %7 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + %8 = call spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 0, ptr addrspace(4) %dst, i32 4, i32 4) + %9 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %7) + call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 4, i32 4) + + ; Pipe packet queries + %10 = call spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4) + store i32 %10, ptr addrspace(1) %max_packets_wo, align 4 + %11 = call spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4) + store i32 %11, ptr addrspace(1) %num_packets_wo, align 4 + %12 = call spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4) + store i32 %12, ptr addrspace(1) %max_packets_ro, align 4 + %13 = call spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4) + store i32 %13, ptr addrspace(1) %num_packets_ro, align 4 + + ; Workgroup operations + %14 = call spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 1, i32 1) + call spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %14, i32 1, i32 1) + %15 = call spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 1, i32 1) + call spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %15, i32 1, i32 1) + + ; Subgroup operations + %16 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4) + call spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %16, i32 4, i32 4) + %17 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4) + call spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %17, i32 4, i32 4) + + ret void +} + +declare spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1), ptr addrspace(4), i32, i32) +declare spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0), ptr addrspace(4), i32, i32) +declare spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32) +declare spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32) +declare spir_func void @__commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func void @__commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) +declare spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId")) +declare spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1), i32, i32) +declare spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1), i32, i32) +declare spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0), i32, i32) +declare spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0), i32, i32) +declare spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32) +declare spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32) +declare spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32) +declare spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32) diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll new file mode 100644 index 0000000000000..4c64a127a7019 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll @@ -0,0 +1,16 @@ +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK: OpDecorate %[[#Id:]] BuiltIn GlobalInvocationId +; CHECK: %[[#Id]] = OpVariable %[[#]] CrossWorkgroup + +@__spirv_BuiltInGlobalInvocationId = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32 + +define spir_kernel void @f() { +entry: + %0 = load i64, ptr addrspace(1) @__spirv_BuiltInGlobalInvocationId, align 32 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll new file mode 100644 index 0000000000000..74ce26bee9cf3 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll @@ -0,0 +1,30 @@ +; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; Check saturation conversion is translated when there is forward declaration +; of SPIRV entry. + +; CHECK: OpDecorate %[[#SAT:]] SaturatedConversion +; CHECK: %[[#SAT]] = OpConvertFToU %[[#]] %[[#]] + +declare spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float) + +define spir_func void @forward(float %val, i8 %initval, ptr addrspace(1) %dst) { +entry: + br label %for.cond + +for.cond: ; preds = %for.body, %entry + %new_val.0 = phi i8 [ %initval, %entry ], [ %call1, %for.body ] + %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %cmp = icmp ult i32 %i.0, 1 + br i1 %cmp, label %for.body, label %for.end + +for.body: ; preds = %for.cond + %call1 = call spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float noundef %val) + %inc = add i32 %i.0, 1 + br label %for.cond + +for.end: ; preds = %for.cond + store i8 %new_val.0, ptr addrspace(1) %dst, align 1 + ret void +} diff --git a/llvm/test/CodeGen/SPIRV/transcoding/float16.ll b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll new file mode 100644 index 0000000000000..0018dba68d4ea --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll @@ -0,0 +1,25 @@ +; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %} + +; CHECK-SPIRV: %[[#HALF:]] = OpTypeFloat 16 +; CHECK-SPIRV: %[[#HALFPTR:]] = OpTypePointer Function %[[#HALF]] +; CHECK-SPIRV: %[[#HALFV2:]] = OpTypeVector %[[#HALF]] 2 +; CHECK-SPIRV: %[[#HALFV2PTR:]] = OpTypePointer Function %[[#HALFV2]] +; CHECK-SPIRV: %[[#CONST:]] = OpConstant %[[#HALF]] 14788 +; CHECK-SPIRV: %[[#ADDR:]] = OpVariable %[[#HALFPTR]] Function +; CHECK-SPIRV: %[[#ADDR2:]] = OpVariable %[[#HALFV2PTR]] Function +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALF]] %[[#]] fract %[[#CONST]] %[[#ADDR]] +; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALFV2]] %[[#]] fract %[[#]] %[[#ADDR2]] + +define spir_kernel void @test() { +entry: + %addr = alloca half + %addr2 = alloca <2 x half> + %res = call spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef 0xH39C4, ptr noundef %addr) + %res2 = call spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef , ptr noundef %addr2) + ret void +} + +declare spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef, ptr noundef) local_unnamed_addr + +declare spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef, ptr noundef) local_unnamed_addr diff --git a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll index eb1527ff3dc4a..32648b6b449a8 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll @@ -790,15 +790,250 @@ entry: ret i16 %result } -declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) - - -declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) -declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) -declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) -declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) -declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>) -declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>) -declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32) -declare <16 x i8> @llvm.masked.load.v16i8.p0(ptr, i32 immarg, <16 x i1>, <16 x i8>) -declare void @llvm.masked.store.v16i8.p0(<16 x i8>, ptr, i32 immarg, <16 x i1>) +define arm_aapcs_vfpcc <4 x i32> @vmulhs_kb_v4i32(<4 x i32> %s0, <4 x i64> %s1) { +; CHECK-LABEL: vmulhs_kb_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s2 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: smmul r0, r0, r1 +; CHECK-NEXT: vmov r1, s0 +; CHECK-NEXT: smmul r1, r1, r2 +; CHECK-NEXT: vmov r2, s7 +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov r1, s11 +; CHECK-NEXT: smmul r0, r0, r1 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: smmul r1, r1, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: bx lr +entry: + %s0s = sext <4 x i32> %s0 to <4 x i64> + %s1s = ashr <4 x i64> %s1, + %m = mul <4 x i64> %s0s, %s1s + %s = ashr <4 x i64> %m, + %s2 = trunc <4 x i64> %s to <4 x i32> + ret <4 x i32> %s2 +} + +define arm_aapcs_vfpcc <4 x i32> @vmulhu_kb_v4i32(<4 x i32> %s0, <4 x i64> %s1) { +; CHECK-LABEL: vmulhu_kb_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s2 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: umull r0, r1, r0, r1 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: umull r0, r2, r0, r2 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov q0[2], q0[0], r2, r1 +; CHECK-NEXT: vmov r1, s11 +; CHECK-NEXT: vmov r2, s7 +; CHECK-NEXT: umull r0, r1, r0, r1 +; CHECK-NEXT: vmov r0, s10 +; CHECK-NEXT: umull r0, r2, r0, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 +; CHECK-NEXT: bx lr +entry: + %s0s = zext <4 x i32> %s0 to <4 x i64> + %s1s = lshr <4 x i64> %s1, + %m = mul <4 x i64> %s0s, %s1s + %s = lshr <4 x i64> %m, + %s2 = trunc <4 x i64> %s to <4 x i32> + ret <4 x i32> %s2 +} + +define arm_aapcs_vfpcc <4 x i32> @vmulhs_kbc_v4i32(<4 x i32> %s0, <4 x i64> %s1) { +; CHECK-LABEL: vmulhs_kbc_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s2 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: smmul r0, r1, r0 +; CHECK-NEXT: vmov r1, s0 +; CHECK-NEXT: smmul r1, r2, r1 +; CHECK-NEXT: vmov r2, s7 +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov r1, s11 +; CHECK-NEXT: smmul r0, r1, r0 +; CHECK-NEXT: vmov r1, s10 +; CHECK-NEXT: smmul r1, r2, r1 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: bx lr +entry: + %s0s = sext <4 x i32> %s0 to <4 x i64> + %s1s = ashr <4 x i64> %s1, + %m = mul <4 x i64> %s1s, %s0s + %s = ashr <4 x i64> %m, + %s2 = trunc <4 x i64> %s to <4 x i32> + ret <4 x i32> %s2 +} + +define arm_aapcs_vfpcc <4 x i32> @vmulhu_kbc_v4i32(<4 x i32> %s0, <4 x i64> %s1) { +; CHECK-LABEL: vmulhu_kbc_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.f32 s4, s2 +; CHECK-NEXT: vmov r1, s9 +; CHECK-NEXT: vmov r2, s5 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s10, s1 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: umull r0, r1, r1, r0 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: umull r0, r2, r2, r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov q0[2], q0[0], r2, r1 +; CHECK-NEXT: vmov r1, s11 +; CHECK-NEXT: vmov r2, s7 +; CHECK-NEXT: umull r0, r1, r1, r0 +; CHECK-NEXT: vmov r0, s10 +; CHECK-NEXT: umull r0, r2, r2, r0 +; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 +; CHECK-NEXT: bx lr +entry: + %s0s = zext <4 x i32> %s0 to <4 x i64> + %s1s = lshr <4 x i64> %s1, + %m = mul <4 x i64> %s1s, %s0s + %s = lshr <4 x i64> %m, + %s2 = trunc <4 x i64> %s to <4 x i32> + ret <4 x i32> %s2 +} + +define arm_aapcs_vfpcc <8 x i16> @vmulhs_kb_v8i16(<8 x i16> %s0, <8 x i32> %s1) { +; CHECK-LABEL: vmulhs_kb_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmovlt.s16 q4, q0 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vmov.f32 s5, s6 +; CHECK-NEXT: vmov.f32 s14, s9 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vshr.s32 q3, q3, #16 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmul.i32 q3, q4, q3 +; CHECK-NEXT: vshr.s32 q1, q1, #16 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmul.i32 q0, q0, q1 +; CHECK-NEXT: vshr.u32 q0, q0, #16 +; CHECK-NEXT: vmovnt.i32 q0, q3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %s0s = sext <8 x i16> %s0 to <8 x i32> + %s1s = ashr <8 x i32> %s1, + %m = mul <8 x i32> %s0s, %s1s + %s = ashr <8 x i32> %m, + %s2 = trunc <8 x i32> %s to <8 x i16> + ret <8 x i16> %s2 +} + +define arm_aapcs_vfpcc <8 x i16> @vmulhu_kb_v8i16(<8 x i16> %s0, <8 x i32> %s1) { +; CHECK-LABEL: vmulhu_kb_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmovlt.u16 q4, q0 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: vmov.f32 s5, s6 +; CHECK-NEXT: vmov.f32 s14, s9 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmul.i32 q3, q4, q3 +; CHECK-NEXT: vshr.u32 q1, q1, #16 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmul.i32 q0, q0, q1 +; CHECK-NEXT: vshr.u32 q0, q0, #16 +; CHECK-NEXT: vmovnt.i32 q0, q3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %s0s = zext <8 x i16> %s0 to <8 x i32> + %s1s = lshr <8 x i32> %s1, + %m = mul <8 x i32> %s0s, %s1s + %s = lshr <8 x i32> %m, + %s2 = trunc <8 x i32> %s to <8 x i16> + ret <8 x i16> %s2 +} + +define arm_aapcs_vfpcc <8 x i16> @vmulhs_kbc_v8i16(<8 x i16> %s0, <8 x i32> %s1) { +; CHECK-LABEL: vmulhs_kbc_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmovlt.s16 q4, q0 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vmov.f32 s5, s6 +; CHECK-NEXT: vmov.f32 s14, s9 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vshr.s32 q3, q3, #16 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmul.i32 q3, q3, q4 +; CHECK-NEXT: vshr.s32 q1, q1, #16 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmul.i32 q0, q1, q0 +; CHECK-NEXT: vshr.u32 q0, q0, #16 +; CHECK-NEXT: vmovnt.i32 q0, q3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %s0s = sext <8 x i16> %s0 to <8 x i32> + %s1s = ashr <8 x i32> %s1, + %m = mul <8 x i32> %s1s, %s0s + %s = ashr <8 x i32> %m, + %s2 = trunc <8 x i32> %s to <8 x i16> + ret <8 x i16> %s2 +} + +define arm_aapcs_vfpcc <8 x i16> @vmulhu_kbc_v8i16(<8 x i16> %s0, <8 x i32> %s1) { +; CHECK-LABEL: vmulhu_kbc_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmovlt.u16 q4, q0 +; CHECK-NEXT: vmov.f32 s13, s7 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: vmov.f32 s5, s6 +; CHECK-NEXT: vmov.f32 s14, s9 +; CHECK-NEXT: vmov.f32 s15, s11 +; CHECK-NEXT: vmov.f32 s6, s8 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmov.f32 s7, s10 +; CHECK-NEXT: vmul.i32 q3, q3, q4 +; CHECK-NEXT: vshr.u32 q1, q1, #16 +; CHECK-NEXT: vshr.u32 q3, q3, #16 +; CHECK-NEXT: vmul.i32 q0, q1, q0 +; CHECK-NEXT: vshr.u32 q0, q0, #16 +; CHECK-NEXT: vmovnt.i32 q0, q3 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: bx lr +entry: + %s0s = zext <8 x i16> %s0 to <8 x i32> + %s1s = lshr <8 x i32> %s1, + %m = mul <8 x i32> %s1s, %s0s + %s = lshr <8 x i32> %m, + %s2 = trunc <8 x i32> %s to <8 x i16> + ret <8 x i16> %s2 +} diff --git a/llvm/test/CodeGen/X86/atomic-bit-test.ll b/llvm/test/CodeGen/X86/atomic-bit-test.ll index 8f91f4120842b..b06bef44a5e9e 100644 --- a/llvm/test/CodeGen/X86/atomic-bit-test.ll +++ b/llvm/test/CodeGen/X86/atomic-bit-test.ll @@ -469,52 +469,56 @@ entry: define i16 @use_in_diff_bb() nounwind { ; X86-LABEL: use_in_diff_bb: ; X86: # %bb.0: # %entry -; X86-NEXT: pushl %esi -; X86-NEXT: movzwl v16, %esi +; X86-NEXT: movzwl v16, %eax ; X86-NEXT: .p2align 4 ; X86-NEXT: .LBB17_1: # %atomicrmw.start ; X86-NEXT: # =>This Inner Loop Header: Depth=1 -; X86-NEXT: movl %esi, %ecx +; X86-NEXT: movl %eax, %ecx ; X86-NEXT: orl $1, %ecx -; X86-NEXT: movl %esi, %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: lock cmpxchgw %cx, v16 -; X86-NEXT: movl %eax, %esi +; X86-NEXT: # kill: def $ax killed $ax def $eax ; X86-NEXT: jne .LBB17_1 ; X86-NEXT: # %bb.2: # %atomicrmw.end -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: testb %al, %al +; X86-NEXT: xorl %ecx, %ecx +; X86-NEXT: testb %cl, %cl ; X86-NEXT: jne .LBB17_4 ; X86-NEXT: # %bb.3: +; X86-NEXT: pushl %esi +; X86-NEXT: movl %eax, %esi ; X86-NEXT: calll foo@PLT -; X86-NEXT: .LBB17_4: -; X86-NEXT: andl $1, %esi ; X86-NEXT: movl %esi, %eax ; X86-NEXT: popl %esi +; X86-NEXT: .LBB17_4: +; X86-NEXT: andl $1, %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax ; X86-NEXT: retl ; ; X64-LABEL: use_in_diff_bb: ; X64: # %bb.0: # %entry -; X64-NEXT: pushq %rbx -; X64-NEXT: movzwl v16(%rip), %ebx +; X64-NEXT: movzwl v16(%rip), %eax ; X64-NEXT: .p2align 4 ; X64-NEXT: .LBB17_1: # %atomicrmw.start ; X64-NEXT: # =>This Inner Loop Header: Depth=1 -; X64-NEXT: movl %ebx, %ecx +; X64-NEXT: movl %eax, %ecx ; X64-NEXT: orl $1, %ecx -; X64-NEXT: movl %ebx, %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: lock cmpxchgw %cx, v16(%rip) -; X64-NEXT: movl %eax, %ebx +; X64-NEXT: # kill: def $ax killed $ax def $eax ; X64-NEXT: jne .LBB17_1 ; X64-NEXT: # %bb.2: # %atomicrmw.end -; X64-NEXT: xorl %eax, %eax -; X64-NEXT: testb %al, %al +; X64-NEXT: xorl %ecx, %ecx +; X64-NEXT: testb %cl, %cl ; X64-NEXT: jne .LBB17_4 ; X64-NEXT: # %bb.3: +; X64-NEXT: pushq %rbx +; X64-NEXT: movl %eax, %ebx ; X64-NEXT: callq foo@PLT -; X64-NEXT: .LBB17_4: -; X64-NEXT: andl $1, %ebx ; X64-NEXT: movl %ebx, %eax ; X64-NEXT: popq %rbx +; X64-NEXT: .LBB17_4: +; X64-NEXT: andl $1, %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq entry: %0 = atomicrmw or ptr @v16, i16 1 monotonic, align 2 diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll index 8aa898f3ec576..da0cef0e4e99b 100644 --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -2119,8 +2119,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) { ; KNL-LABEL: ktest_1: ; KNL: ## %bb.0: ; KNL-NEXT: vcmpgtpd (%rdi), %zmm0, %k1 -; KNL-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} -; KNL-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} +; KNL-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1} ; KNL-NEXT: kmovw %k0, %eax ; KNL-NEXT: testb %al, %al ; KNL-NEXT: je LBB44_2 @@ -2152,8 +2151,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) { ; AVX512BW-LABEL: ktest_1: ; AVX512BW: ## %bb.0: ; AVX512BW-NEXT: vcmpgtpd (%rdi), %zmm0, %k1 -; AVX512BW-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z} -; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1} +; AVX512BW-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1} ; AVX512BW-NEXT: kmovd %k0, %eax ; AVX512BW-NEXT: testb %al, %al ; AVX512BW-NEXT: je LBB44_2 diff --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll index ff9f995c4765b..51a8bf5b48415 100644 --- a/llvm/test/CodeGen/X86/combine-add.ll +++ b/llvm/test/CodeGen/X86/combine-add.ll @@ -235,10 +235,10 @@ define void @PR52039(ptr %pa, ptr %pb) { ; SSE-NEXT: psubd %xmm1, %xmm3 ; SSE-NEXT: psubd %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: paddd %xmm2, %xmm0 +; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: paddd %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm3, %xmm1 -; SSE-NEXT: paddd %xmm3, %xmm1 +; SSE-NEXT: paddd %xmm1, %xmm1 ; SSE-NEXT: paddd %xmm3, %xmm1 ; SSE-NEXT: movdqu %xmm3, 16(%rsi) ; SSE-NEXT: movdqu %xmm2, (%rsi) diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll index 8e4a50ea266c3..ae4d24f91ffc0 100644 --- a/llvm/test/CodeGen/X86/combine-mul.ll +++ b/llvm/test/CodeGen/X86/combine-mul.ll @@ -81,7 +81,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) { ; SSE-LABEL: combine_vec_mul_pow2c: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: paddq %xmm0, %xmm2 +; SSE-NEXT: paddq %xmm2, %xmm2 ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psllq $4, %xmm2 diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 98187d61c1f84..6bcbfe1808933 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -2187,13 +2187,13 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: pxor %xmm3, %xmm3 ; SSE41-NEXT: pcmpgtb %xmm1, %xmm3 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] ; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,2,2,2,2,128,2,128] ; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: paddw %xmm4, %xmm4 -; SSE41-NEXT: pmovsxbw %xmm1, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4,5],xmm4[6],xmm2[7] +; SSE41-NEXT: pmovsxbw %xmm1, %xmm0 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; SSE41-NEXT: paddw %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2],xmm0[3,4,5],xmm2[6],xmm0[7] ; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: packuswb %xmm3, %xmm2 ; SSE41-NEXT: paddb %xmm1, %xmm2 @@ -2201,15 +2201,14 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; SSE41-NEXT: psraw $8, %xmm0 ; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: paddw %xmm0, %xmm3 -; SSE41-NEXT: psllw $7, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5],xmm0[6],xmm3[7] -; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: psllw $7, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6],xmm0[7] +; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE41-NEXT: psraw $8, %xmm2 ; SSE41-NEXT: psllw $7, %xmm2 ; SSE41-NEXT: psrlw $8, %xmm2 -; SSE41-NEXT: packuswb %xmm0, %xmm2 +; SSE41-NEXT: packuswb %xmm3, %xmm2 ; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255] @@ -2225,18 +2224,17 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [256,2,2,2,2,128,2,128] ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4,5],xmm2[6],xmm3[7] +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5],xmm3[6],xmm2[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 ; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm1 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 -; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5],xmm2[6],xmm3[7] +; AVX1-NEXT: vpsllw $7, %xmm2, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6],xmm2[7] ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll index 54d82b0c1c929..c66473e9edd19 100644 --- a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll +++ b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll @@ -1756,263 +1756,131 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind ; ; AVX512-LABEL: test_fmaximumnum_v4f16: ; AVX512: # %bb.0: -; AVX512-NEXT: subq $56, %rsp -; AVX512-NEXT: vmovdqa %xmm1, %xmm5 -; AVX512-NEXT: vmovdqa %xmm0, %xmm6 -; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpsrldq {{.*#+}} xmm1 = xmm6[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm1 -; AVX512-NEXT: seta %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm2 -; AVX512-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[3,3,3,3] -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm6[3,3,3,3] -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm1 -; AVX512-NEXT: seta %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; AVX512-NEXT: vpsrldq {{.*#+}} xmm1 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpsrldq {{.*#+}} xmm2 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512-NEXT: vucomiss %xmm2, %xmm2 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm2 -; AVX512-NEXT: seta %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm3 -; AVX512-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm5[1,0] -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm6[1,0] +; AVX512-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512-NEXT: vucomiss %xmm2, %xmm2 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm13 -; AVX512-NEXT: vcvtph2ps %xmm13, %xmm2 -; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm12 -; AVX512-NEXT: vcvtph2ps %xmm12, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm2 -; AVX512-NEXT: seta %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; AVX512-NEXT: vpsrlq $48, %xmm5, %xmm0 -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpsrlq $48, %xmm6, %xmm1 -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm11 -; AVX512-NEXT: vcvtph2ps %xmm11, %xmm1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm15 -; AVX512-NEXT: vcvtph2ps %xmm15, %xmm7 -; AVX512-NEXT: vucomiss %xmm7, %xmm1 -; AVX512-NEXT: seta %al +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm7, %xmm7 {%k1} -; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm5[1,1,3,3] -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al +; AVX512-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: vmovdqa %xmm2, %xmm4 +; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} +; AVX512-NEXT: vmaxss %xmm4, %xmm3, %xmm2 +; AVX512-NEXT: vcmpordss %xmm3, %xmm3, %k1 +; AVX512-NEXT: vmovss %xmm3, %xmm2, %xmm2 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3] +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm6[1,1,3,3] -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm9 -; AVX512-NEXT: vcvtph2ps %xmm9, %xmm4 -; AVX512-NEXT: vmovss %xmm4, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm10 -; AVX512-NEXT: vcvtph2ps %xmm10, %xmm3 -; AVX512-NEXT: vucomiss %xmm3, %xmm4 -; AVX512-NEXT: seta %al +; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[3,3,3,3] +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovdqa %xmm3, %xmm5 +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 +; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 +; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; AVX512-NEXT: vpsrldq {{.*#+}} xmm3 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 +; AVX512-NEXT: vpsrldq {{.*#+}} xmm4 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovdqa %xmm3, %xmm5 +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 +; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 ; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} -; AVX512-NEXT: vcvtph2ps %xmm5, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm0 -; AVX512-NEXT: setp %al +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: vshufpd {{.*#+}} xmm4 = xmm0[1,0] +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vcvtph2ps %xmm6, %xmm4 -; AVX512-NEXT: vucomiss %xmm4, %xmm4 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm0, %xmm4, %xmm4 {%k2} +; AVX512-NEXT: vshufpd {{.*#+}} xmm5 = xmm1[1,0] +; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5 +; AVX512-NEXT: vmovdqa %xmm4, %xmm6 +; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 +; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} ; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 -; AVX512-NEXT: vcvtph2ps %xmm4, %xmm1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm8 -; AVX512-NEXT: vcvtph2ps %xmm8, %xmm2 -; AVX512-NEXT: vucomiss %xmm2, %xmm1 -; AVX512-NEXT: seta %al +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm3 +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} -; AVX512-NEXT: vpsrld $16, %xmm5, %xmm1 -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512-NEXT: vucomiss %xmm1, %xmm1 -; AVX512-NEXT: setp %al +; AVX512-NEXT: vpsrlq $48, %xmm1, %xmm4 +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovdqa %xmm3, %xmm5 +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3 +; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1 +; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vpsrld $16, %xmm6, %xmm5 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3] ; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512-NEXT: vucomiss %xmm5, %xmm5 -; AVX512-NEXT: setp %al -; AVX512-NEXT: kmovw %eax, %k2 -; AVX512-NEXT: vmovss %xmm1, %xmm5, %xmm5 {%k2} -; AVX512-NEXT: vcvtps2ph $4, %xmm5, %xmm6 -; AVX512-NEXT: vcvtph2ps %xmm6, %xmm5 -; AVX512-NEXT: vmovss %xmm5, %xmm1, %xmm1 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512-NEXT: vcvtph2ps %xmm1, %xmm0 -; AVX512-NEXT: vucomiss %xmm0, %xmm5 -; AVX512-NEXT: seta %al +; AVX512-NEXT: vmovdqa %xmm4, %xmm6 +; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 +; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm4 +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al ; AVX512-NEXT: kmovw %eax, %k1 -; AVX512-NEXT: vmovss %xmm5, %xmm0, %xmm0 {%k1} -; AVX512-NEXT: vcvtps2ph $4, %xmm7, %xmm7 -; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm5 -; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm2 -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm14 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1] -; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload -; AVX512-NEXT: # xmm0 = xmm0[0],mem[0] -; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload -; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload -; AVX512-NEXT: # xmm14 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3] -; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload -; AVX512-NEXT: # xmm13 = xmm13[0],mem[0],xmm13[1],mem[1],xmm13[2],mem[2],xmm13[3],mem[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm13 = xmm13[0],xmm14[0],xmm13[1],xmm14[1] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1] -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm13[0] -; AVX512-NEXT: vpxor %xmm6, %xmm6, %xmm6 -; AVX512-NEXT: vpcmpeqw %xmm6, %xmm4, %xmm9 -; AVX512-NEXT: vpblendvb %xmm9, %xmm4, %xmm0, %xmm4 -; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload -; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload -; AVX512-NEXT: # xmm9 = xmm9[0],mem[0],xmm9[1],mem[1],xmm9[2],mem[2],xmm9[3],mem[3] -; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm11 # 16-byte Folded Reload -; AVX512-NEXT: # xmm11 = xmm12[0],mem[0],xmm12[1],mem[1],xmm12[2],mem[2],xmm12[3],mem[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm9 = xmm11[0],xmm9[0],xmm11[1],xmm9[1] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3] -; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3] -; AVX512-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] -; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm9[0] -; AVX512-NEXT: vpcmpeqw %xmm6, %xmm1, %xmm6 -; AVX512-NEXT: vpblendvb %xmm6, %xmm1, %xmm4, %xmm1 -; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512-NEXT: xorl %eax, %eax -; AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $65535, %ecx # imm = 0xFFFF -; AVX512-NEXT: movl $0, %edx -; AVX512-NEXT: cmovel %ecx, %edx -; AVX512-NEXT: vcvtph2ps %xmm5, %xmm2 -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %esi -; AVX512-NEXT: cmovel %ecx, %esi -; AVX512-NEXT: vcvtph2ps %xmm3, %xmm2 -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %edi -; AVX512-NEXT: cmovel %ecx, %edi -; AVX512-NEXT: vcvtph2ps %xmm7, %xmm2 -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %r8d -; AVX512-NEXT: cmovel %ecx, %r8d -; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %r9d -; AVX512-NEXT: cmovel %ecx, %r9d -; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %r10d -; AVX512-NEXT: cmovel %ecx, %r10d -; AVX512-NEXT: vcvtph2ps (%rsp), %xmm2 # 16-byte Folded Reload -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: movl $0, %r11d -; AVX512-NEXT: cmovel %ecx, %r11d -; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload -; AVX512-NEXT: vucomiss %xmm4, %xmm2 -; AVX512-NEXT: vmovd %esi, %xmm2 -; AVX512-NEXT: vpinsrw $1, %edx, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $2, %edi, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $3, %r8d, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $4, %r9d, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $5, %r10d, %xmm2, %xmm2 -; AVX512-NEXT: vpinsrw $6, %r11d, %xmm2, %xmm2 -; AVX512-NEXT: cmovel %ecx, %eax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm2, %xmm2 -; AVX512-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: addq $56, %rsp +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm5 +; AVX512-NEXT: vmovdqa %xmm4, %xmm6 +; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1} +; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4 +; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1 +; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: testl %eax, %eax +; AVX512-NEXT: sets %al +; AVX512-NEXT: kmovw %eax, %k1 +; AVX512-NEXT: vpsrld $16, %xmm1, %xmm1 +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512-NEXT: vmovdqa %xmm0, %xmm5 +; AVX512-NEXT: vmovss %xmm1, %xmm5, %xmm5 {%k1} +; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} +; AVX512-NEXT: vmaxss %xmm5, %xmm1, %xmm0 +; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1 +; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; AVX512-NEXT: retq ; ; AVX10_2-LABEL: test_fmaximumnum_v4f16: diff --git a/llvm/test/CodeGen/X86/ifma-combine-vpmadd52.ll b/llvm/test/CodeGen/X86/ifma-combine-vpmadd52.ll index aebfc7d483d6f..3ece4beb9c22e 100644 --- a/llvm/test/CodeGen/X86/ifma-combine-vpmadd52.ll +++ b/llvm/test/CodeGen/X86/ifma-combine-vpmadd52.ll @@ -1,25 +1,26 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=x86_64-- -mattr=+avxifma | FileCheck %s --check-prefixes=X64,AVX +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avxifma | FileCheck %s --check-prefixes=X64,AVX,AVXIFMA ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512ifma | FileCheck %s --check-prefixes=X64,AVX512,AVX512-NOVL ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512ifma,+avx512vl | FileCheck %s --check-prefixes=X64,AVX512,AVX512VL +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avxifma,+avx512vl | FileCheck %s --check-prefixes=X64,AVX,AVX512-NOIFMA ; 67108863 == (1 << 26) - 1 ; 4503599627370496 == (1 << 52) ; 4503599627370495 == (1 << 52) - 1 define <8 x i64> @test_512_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_512_combine: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm6, %ymm2, %ymm2 -; AVX-NEXT: vpand %ymm6, %ymm0, %ymm0 -; AVX-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 -; AVX-NEXT: vpand %ymm6, %ymm3, %ymm0 -; AVX-NEXT: vpand %ymm6, %ymm1, %ymm1 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5 -; AVX-NEXT: vmovdqa %ymm4, %ymm0 -; AVX-NEXT: vmovdqa %ymm5, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_512_combine: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm6 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVXIFMA-NEXT: vpand %ymm6, %ymm0, %ymm0 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 +; AVXIFMA-NEXT: vpand %ymm6, %ymm3, %ymm0 +; AVXIFMA-NEXT: vpand %ymm6, %ymm1, %ymm1 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5 +; AVXIFMA-NEXT: vmovdqa %ymm4, %ymm0 +; AVXIFMA-NEXT: vmovdqa %ymm5, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_512_combine: ; AVX512: # %bb.0: @@ -29,6 +30,19 @@ define <8 x i64> @test_512_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { ; AVX512-NEXT: vpmadd52luq %zmm1, %zmm0, %zmm2 ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_512_combine: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpbroadcastq {{.*#+}} zmm3 = [67108863,67108863,67108863,67108863,67108863,67108863,67108863,67108863] +; AVX512-NOIFMA-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpandq %zmm3, %zmm1, %zmm1 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm1, %ymm3 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm0, %ymm4 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm2, %ymm5 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm3, %ymm4, %ymm5 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm2 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <8 x i64> %x, splat (i64 67108863) %y_masked = and <8 x i64> %y, splat (i64 67108863) %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked @@ -37,19 +51,19 @@ define <8 x i64> @test_512_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { } define <8 x i64> @test_512_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_512_combine_v2: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [3,3,3,3] -; AVX-NEXT: vpand %ymm6, %ymm2, %ymm2 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1125899906842623,1125899906842623,1125899906842623,1125899906842623] -; AVX-NEXT: vpand %ymm7, %ymm0, %ymm0 -; AVX-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 -; AVX-NEXT: vpand %ymm6, %ymm3, %ymm0 -; AVX-NEXT: vpand %ymm7, %ymm1, %ymm1 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5 -; AVX-NEXT: vmovdqa %ymm4, %ymm0 -; AVX-NEXT: vmovdqa %ymm5, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_512_combine_v2: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm6 = [3,3,3,3] +; AVXIFMA-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm7 = [1125899906842623,1125899906842623,1125899906842623,1125899906842623] +; AVXIFMA-NEXT: vpand %ymm7, %ymm0, %ymm0 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 +; AVXIFMA-NEXT: vpand %ymm6, %ymm3, %ymm0 +; AVXIFMA-NEXT: vpand %ymm7, %ymm1, %ymm1 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm1, %ymm5 +; AVXIFMA-NEXT: vmovdqa %ymm4, %ymm0 +; AVXIFMA-NEXT: vmovdqa %ymm5, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_512_combine_v2: ; AVX512: # %bb.0: @@ -58,6 +72,18 @@ define <8 x i64> @test_512_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) ; AVX512-NEXT: vpmadd52luq %zmm1, %zmm0, %zmm2 ; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_512_combine_v2: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm2, %ymm3 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm1, %ymm4 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm0, %ymm5 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm4, %ymm5, %ymm3 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm2 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <8 x i64> %x, splat (i64 1125899906842623) ; (1 << 50) - 1 %y_masked = and <8 x i64> %y, splat (i64 3) %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked @@ -66,32 +92,32 @@ define <8 x i64> @test_512_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) } define <8 x i64> @test_512_no_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_512_no_combine: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495] -; AVX-NEXT: vpand %ymm6, %ymm0, %ymm7 -; AVX-NEXT: vpand %ymm6, %ymm1, %ymm8 -; AVX-NEXT: vpand %ymm6, %ymm2, %ymm9 -; AVX-NEXT: vpand %ymm6, %ymm3, %ymm6 -; AVX-NEXT: vpsrlq $32, %ymm8, %ymm8 -; AVX-NEXT: vpmuludq %ymm3, %ymm8, %ymm8 -; AVX-NEXT: vpsrlq $32, %ymm6, %ymm6 -; AVX-NEXT: vpmuludq %ymm6, %ymm1, %ymm6 -; AVX-NEXT: vpaddq %ymm6, %ymm8, %ymm6 -; AVX-NEXT: vpsllq $32, %ymm6, %ymm6 -; AVX-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 -; AVX-NEXT: vpsrlq $32, %ymm7, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 -; AVX-NEXT: vpsrlq $32, %ymm9, %ymm7 -; AVX-NEXT: vpmuludq %ymm7, %ymm0, %ymm7 -; AVX-NEXT: vpaddq %ymm3, %ymm7, %ymm3 -; AVX-NEXT: vpsllq $32, %ymm3, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm4, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm3, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm5, %ymm1, %ymm1 -; AVX-NEXT: vpaddq %ymm6, %ymm1, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_512_no_combine: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm6 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495] +; AVXIFMA-NEXT: vpand %ymm6, %ymm0, %ymm7 +; AVXIFMA-NEXT: vpand %ymm6, %ymm1, %ymm8 +; AVXIFMA-NEXT: vpand %ymm6, %ymm2, %ymm9 +; AVXIFMA-NEXT: vpand %ymm6, %ymm3, %ymm6 +; AVXIFMA-NEXT: vpsrlq $32, %ymm8, %ymm8 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm8, %ymm8 +; AVXIFMA-NEXT: vpsrlq $32, %ymm6, %ymm6 +; AVXIFMA-NEXT: vpmuludq %ymm6, %ymm1, %ymm6 +; AVXIFMA-NEXT: vpaddq %ymm6, %ymm8, %ymm6 +; AVXIFMA-NEXT: vpsllq $32, %ymm6, %ymm6 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpsrlq $32, %ymm7, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpsrlq $32, %ymm9, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm7, %ymm0, %ymm7 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm7, %ymm3 +; AVXIFMA-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm4, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm5, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpaddq %ymm6, %ymm1, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_512_no_combine: ; AVX512: # %bb.0: @@ -108,6 +134,22 @@ define <8 x i64> @test_512_no_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) ; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm3, %zmm0, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_512_no_combine: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpbroadcastq {{.*#+}} zmm3 = [4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495,4503599627370495] +; AVX512-NOIFMA-NEXT: vpandq %zmm3, %zmm0, %zmm4 +; AVX512-NOIFMA-NEXT: vpandq %zmm3, %zmm1, %zmm3 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm4, %zmm4 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm4, %zmm4 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm3, %zmm0, %zmm3 +; AVX512-NOIFMA-NEXT: vpaddq %zmm4, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpsllq $32, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm3, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <8 x i64> %x, splat (i64 4503599627370495) %y_masked = and <8 x i64> %y, splat (i64 4503599627370495) %mul = mul nuw nsw <8 x i64> %x_masked, %y_masked @@ -116,27 +158,27 @@ define <8 x i64> @test_512_no_combine(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) } define <8 x i64> @test_512_no_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_512_no_combine_v2: -; AVX: # %bb.0: -; AVX-NEXT: vpsrlq $32, %ymm1, %ymm6 -; AVX-NEXT: vpmuludq %ymm3, %ymm6, %ymm6 -; AVX-NEXT: vpsrlq $32, %ymm3, %ymm7 -; AVX-NEXT: vpmuludq %ymm7, %ymm1, %ymm7 -; AVX-NEXT: vpaddq %ymm6, %ymm7, %ymm6 -; AVX-NEXT: vpsllq $32, %ymm6, %ymm6 -; AVX-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 -; AVX-NEXT: vpsrlq $32, %ymm0, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 -; AVX-NEXT: vpsrlq $32, %ymm2, %ymm7 -; AVX-NEXT: vpmuludq %ymm7, %ymm0, %ymm7 -; AVX-NEXT: vpaddq %ymm3, %ymm7, %ymm3 -; AVX-NEXT: vpsllq $32, %ymm3, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm4, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm3, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm5, %ymm1, %ymm1 -; AVX-NEXT: vpaddq %ymm6, %ymm1, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_512_no_combine_v2: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpsrlq $32, %ymm1, %ymm6 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm6, %ymm6 +; AVXIFMA-NEXT: vpsrlq $32, %ymm3, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm7, %ymm1, %ymm7 +; AVXIFMA-NEXT: vpaddq %ymm6, %ymm7, %ymm6 +; AVXIFMA-NEXT: vpsllq $32, %ymm6, %ymm6 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpsrlq $32, %ymm0, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpsrlq $32, %ymm2, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm7, %ymm0, %ymm7 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm7, %ymm3 +; AVXIFMA-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm4, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm5, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpaddq %ymm6, %ymm1, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_512_no_combine_v2: ; AVX512: # %bb.0: @@ -150,6 +192,19 @@ define <8 x i64> @test_512_no_combine_v2(<8 x i64> %x, <8 x i64> %y, <8 x i64> % ; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm3, %zmm0, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_512_no_combine_v2: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm0, %zmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm1, %zmm4 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm4, %zmm0, %zmm4 +; AVX512-NOIFMA-NEXT: vpaddq %zmm3, %zmm4, %zmm3 +; AVX512-NOIFMA-NEXT: vpsllq $32, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm3, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: retq %mul = mul <8 x i64> %x, %y %res = add <8 x i64> %mul, %z ret <8 x i64> %res @@ -255,25 +310,25 @@ define <1 x i64> @test_scalar_no_ifma(<1 x i64> %x, <1 x i64> %y, <1 x i64> %z) ; 40-bit and 13-bit, too wide define <8 x i64> @test_mixed_width_too_wide(<8 x i64> %x, <8 x i64> %y, <8 x i64> %z) { -; AVX-LABEL: test_mixed_width_too_wide: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm6 = [8191,8191,8191,8191] -; AVX-NEXT: vpand %ymm6, %ymm2, %ymm2 -; AVX-NEXT: vpand %ymm6, %ymm3, %ymm3 -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm6 = [2155905028,2155905036,2155905044,2155905052] -; AVX-NEXT: vpshufb %ymm6, %ymm1, %ymm7 -; AVX-NEXT: vpmuludq %ymm3, %ymm7, %ymm7 -; AVX-NEXT: vpsllq $32, %ymm7, %ymm7 -; AVX-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 -; AVX-NEXT: vpshufb %ymm6, %ymm0, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 -; AVX-NEXT: vpsllq $32, %ymm3, %ymm3 -; AVX-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm0, %ymm4, %ymm0 -; AVX-NEXT: vpaddq %ymm3, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm1, %ymm5, %ymm1 -; AVX-NEXT: vpaddq %ymm7, %ymm1, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_mixed_width_too_wide: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm6 = [8191,8191,8191,8191] +; AVXIFMA-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVXIFMA-NEXT: vpand %ymm6, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm6 = [2155905028,2155905036,2155905044,2155905052] +; AVXIFMA-NEXT: vpshufb %ymm6, %ymm1, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm7, %ymm7 +; AVXIFMA-NEXT: vpsllq $32, %ymm7, %ymm7 +; AVXIFMA-NEXT: vpmuludq %ymm3, %ymm1, %ymm1 +; AVXIFMA-NEXT: vpshufb %ymm6, %ymm0, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVXIFMA-NEXT: vpmuludq %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm0, %ymm4, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm3, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm1, %ymm5, %ymm1 +; AVXIFMA-NEXT: vpaddq %ymm7, %ymm1, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_mixed_width_too_wide: ; AVX512: # %bb.0: @@ -286,6 +341,18 @@ define <8 x i64> @test_mixed_width_too_wide(<8 x i64> %x, <8 x i64> %y, <8 x i64 ; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm1 ; AVX512-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_mixed_width_too_wide: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm3 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpsllq $32, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm3, %zmm2, %zmm1 +; AVX512-NOIFMA-NEXT: vpaddq %zmm0, %zmm1, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x40 = and <8 x i64> %x, splat (i64 1099511627775) %y13 = and <8 x i64> %y, splat (i64 8191) %mul = mul <8 x i64> %x40, %y13 @@ -294,19 +361,19 @@ define <8 x i64> @test_mixed_width_too_wide(<8 x i64> %x, <8 x i64> %y, <8 x i64 } define <8 x i64> @test_zext32_inputs_not_safe(<8 x i32> %xi32, <8 x i32> %yi32, <8 x i64> %z) { -; AVX-LABEL: test_zext32_inputs_not_safe: -; AVX: # %bb.0: -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX-NEXT: vpmuludq %ymm5, %ymm4, %ymm4 -; AVX-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm1 -; AVX-NEXT: vpaddq %ymm4, %ymm2, %ymm0 -; AVX-NEXT: vpaddq %ymm1, %ymm3, %ymm1 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_zext32_inputs_not_safe: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVXIFMA-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVXIFMA-NEXT: vpmuludq %ymm5, %ymm4, %ymm4 +; AVXIFMA-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVXIFMA-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVXIFMA-NEXT: vpmuludq %ymm1, %ymm0, %ymm1 +; AVXIFMA-NEXT: vpaddq %ymm4, %ymm2, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm1, %ymm3, %ymm1 +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_zext32_inputs_not_safe: ; AVX512: # %bb.0: @@ -315,6 +382,14 @@ define <8 x i64> @test_zext32_inputs_not_safe(<8 x i32> %xi32, <8 x i32> %yi32, ; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_zext32_inputs_not_safe: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero +; AVX512-NOIFMA-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero +; AVX512-NOIFMA-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm0, %zmm2, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x = zext <8 x i32> %xi32 to <8 x i64> %y = zext <8 x i32> %yi32 to <8 x i64> %mul = mul <8 x i64> %x, %y @@ -323,36 +398,36 @@ define <8 x i64> @test_zext32_inputs_not_safe(<8 x i32> %xi32, <8 x i32> %yi32, } define <16 x i64> @test_1024_combine_split(<16 x i64> %x, <16 x i64> %y, <16 x i64> %z) nounwind { -; AVX-LABEL: test_1024_combine_split: -; AVX: # %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $32, %rsp -; AVX-NEXT: vmovdqa 112(%rbp), %ymm8 -; AVX-NEXT: vmovdqa 80(%rbp), %ymm9 -; AVX-NEXT: vmovdqa 48(%rbp), %ymm10 -; AVX-NEXT: vmovdqa 16(%rbp), %ymm11 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm12 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm3, %ymm12, %ymm3 -; AVX-NEXT: vpand %ymm2, %ymm12, %ymm2 -; AVX-NEXT: vpand %ymm1, %ymm12, %ymm1 -; AVX-NEXT: vpand %ymm0, %ymm12, %ymm0 -; AVX-NEXT: vpand %ymm7, %ymm12, %ymm7 -; AVX-NEXT: {vex} vpmadd52luq %ymm7, %ymm3, %ymm8 -; AVX-NEXT: vpand %ymm6, %ymm12, %ymm3 -; AVX-NEXT: {vex} vpmadd52luq %ymm3, %ymm2, %ymm9 -; AVX-NEXT: vpand %ymm5, %ymm12, %ymm2 -; AVX-NEXT: {vex} vpmadd52luq %ymm2, %ymm1, %ymm10 -; AVX-NEXT: vpand %ymm4, %ymm12, %ymm1 -; AVX-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm11 -; AVX-NEXT: vmovdqa %ymm11, %ymm0 -; AVX-NEXT: vmovdqa %ymm10, %ymm1 -; AVX-NEXT: vmovdqa %ymm9, %ymm2 -; AVX-NEXT: vmovdqa %ymm8, %ymm3 -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_1024_combine_split: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: pushq %rbp +; AVXIFMA-NEXT: movq %rsp, %rbp +; AVXIFMA-NEXT: andq $-32, %rsp +; AVXIFMA-NEXT: subq $32, %rsp +; AVXIFMA-NEXT: vmovdqa 112(%rbp), %ymm8 +; AVXIFMA-NEXT: vmovdqa 80(%rbp), %ymm9 +; AVXIFMA-NEXT: vmovdqa 48(%rbp), %ymm10 +; AVXIFMA-NEXT: vmovdqa 16(%rbp), %ymm11 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm12 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm3, %ymm12, %ymm3 +; AVXIFMA-NEXT: vpand %ymm2, %ymm12, %ymm2 +; AVXIFMA-NEXT: vpand %ymm1, %ymm12, %ymm1 +; AVXIFMA-NEXT: vpand %ymm0, %ymm12, %ymm0 +; AVXIFMA-NEXT: vpand %ymm7, %ymm12, %ymm7 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm7, %ymm3, %ymm8 +; AVXIFMA-NEXT: vpand %ymm6, %ymm12, %ymm3 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm3, %ymm2, %ymm9 +; AVXIFMA-NEXT: vpand %ymm5, %ymm12, %ymm2 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm1, %ymm10 +; AVXIFMA-NEXT: vpand %ymm4, %ymm12, %ymm1 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm0, %ymm11 +; AVXIFMA-NEXT: vmovdqa %ymm11, %ymm0 +; AVXIFMA-NEXT: vmovdqa %ymm10, %ymm1 +; AVXIFMA-NEXT: vmovdqa %ymm9, %ymm2 +; AVXIFMA-NEXT: vmovdqa %ymm8, %ymm3 +; AVXIFMA-NEXT: movq %rbp, %rsp +; AVXIFMA-NEXT: popq %rbp +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_1024_combine_split: ; AVX512: # %bb.0: @@ -366,6 +441,27 @@ define <16 x i64> @test_1024_combine_split(<16 x i64> %x, <16 x i64> %y, <16 x i ; AVX512-NEXT: vmovdqa64 %zmm4, %zmm0 ; AVX512-NEXT: vmovdqa64 %zmm5, %zmm1 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_1024_combine_split: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpbroadcastq {{.*#+}} zmm6 = [67108863,67108863,67108863,67108863,67108863,67108863,67108863,67108863] +; AVX512-NOIFMA-NEXT: vpandq %zmm6, %zmm1, %zmm1 +; AVX512-NOIFMA-NEXT: vpandq %zmm6, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpandq %zmm6, %zmm3, %zmm3 +; AVX512-NOIFMA-NEXT: vpandq %zmm6, %zmm2, %zmm2 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm2, %ymm6 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm0, %ymm7 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm4, %ymm8 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm6, %ymm7, %ymm8 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm0, %ymm4 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm8, %zmm4, %zmm0 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm3, %ymm2 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm1, %ymm4 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm5, %ymm6 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm4, %ymm6 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm3, %ymm1, %ymm5 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm1 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <16 x i64> %x, splat (i64 67108863) %y_masked = and <16 x i64> %y, splat (i64 67108863) %mul = mul <16 x i64> %x_masked, %y_masked @@ -388,13 +484,13 @@ define <1 x i64> @test_not_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %z) { } define <3 x i64> @test_v3i64(<3 x i64> %x, <3 x i64> %y, <3 x i64> %z) { -; AVX-LABEL: test_v3i64: -; AVX: # %bb.0: -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vpmuludq %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vpaddq %ymm2, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_v3i64: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm1 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpmuludq %ymm0, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: retq ; ; AVX512-NOVL-LABEL: test_v3i64: ; AVX512-NOVL: # %bb.0: @@ -410,6 +506,13 @@ define <3 x i64> @test_v3i64(<3 x i64> %x, <3 x i64> %y, <3 x i64> %z) { ; AVX512VL-NEXT: vpmuludq %ymm0, %ymm0, %ymm0 ; AVX512VL-NEXT: vpaddq %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_v3i64: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 +; AVX512-NOIFMA-NEXT: vpmuludq %ymm0, %ymm0, %ymm0 +; AVX512-NOIFMA-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <3 x i64> %x, splat (i64 67108863) %y_masked = and <3 x i64> %x, splat (i64 67108863) %mul = mul <3 x i64> %x_masked, %y_masked @@ -418,35 +521,35 @@ define <3 x i64> @test_v3i64(<3 x i64> %x, <3 x i64> %y, <3 x i64> %z) { } define <5 x i64> @test_v5i64(<5 x i64> %x, <5 x i64> %y, <5 x i64> %z) { -; AVX-LABEL: test_v5i64: -; AVX: # %bb.0: -; AVX-NEXT: movq %rdi, %rax -; AVX-NEXT: vmovq %r8, %xmm0 -; AVX-NEXT: vmovq %rcx, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vmovq %rdx, %xmm1 -; AVX-NEXT: vmovq %rsi, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm2 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm3 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF -; AVX-NEXT: vmovq %rcx, %xmm3 -; AVX-NEXT: vmovq %r9, %xmm4 -; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 -; AVX-NEXT: vpsrlq $32, %xmm3, %xmm4 -; AVX-NEXT: vpmuludq %xmm4, %xmm3, %xmm4 -; AVX-NEXT: vpsllq $33, %xmm4, %xmm4 -; AVX-NEXT: vpmuludq %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpaddq %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vpaddq %xmm4, %xmm1, %xmm1 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm2 -; AVX-NEXT: vmovdqa %ymm2, (%rdi) -; AVX-NEXT: vmovq %xmm1, 32(%rdi) -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_v5i64: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: movq %rdi, %rax +; AVXIFMA-NEXT: vmovq %r8, %xmm0 +; AVXIFMA-NEXT: vmovq %rcx, %xmm1 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVXIFMA-NEXT: vmovq %rdx, %xmm1 +; AVXIFMA-NEXT: vmovq %rsi, %xmm2 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVXIFMA-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVXIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm2 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm3 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm3, %ymm0, %ymm0 +; AVXIFMA-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF +; AVXIFMA-NEXT: vmovq %rcx, %xmm3 +; AVXIFMA-NEXT: vmovq %r9, %xmm4 +; AVXIFMA-NEXT: vpand %xmm3, %xmm4, %xmm3 +; AVXIFMA-NEXT: vpsrlq $32, %xmm3, %xmm4 +; AVXIFMA-NEXT: vpmuludq %xmm4, %xmm3, %xmm4 +; AVXIFMA-NEXT: vpsllq $33, %xmm4, %xmm4 +; AVXIFMA-NEXT: vpmuludq %xmm3, %xmm3, %xmm3 +; AVXIFMA-NEXT: vpaddq %xmm1, %xmm3, %xmm1 +; AVXIFMA-NEXT: vpaddq %xmm4, %xmm1, %xmm1 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm2 +; AVXIFMA-NEXT: vmovdqa %ymm2, (%rdi) +; AVXIFMA-NEXT: vmovq %xmm1, 32(%rdi) +; AVXIFMA-NEXT: vzeroupper +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_v5i64: ; AVX512: # %bb.0: @@ -454,6 +557,13 @@ define <5 x i64> @test_v5i64(<5 x i64> %x, <5 x i64> %y, <5 x i64> %z) { ; AVX512-NEXT: vpmuludq %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_v5i64: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm0, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <5 x i64> %x, splat (i64 67108863) %y_masked = and <5 x i64> %x, splat (i64 67108863) %mul = mul <5 x i64> %x_masked, %y_masked @@ -462,30 +572,30 @@ define <5 x i64> @test_v5i64(<5 x i64> %x, <5 x i64> %y, <5 x i64> %z) { } define <6 x i64> @test_v6i64(<6 x i64> %x, <6 x i64> %y, <6 x i64> %z) { -; AVX-LABEL: test_v6i64: -; AVX: # %bb.0: -; AVX-NEXT: movq %rdi, %rax -; AVX-NEXT: vmovq %r8, %xmm0 -; AVX-NEXT: vmovq %rcx, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vmovq %rdx, %xmm1 -; AVX-NEXT: vmovq %rsi, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm1 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm2 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm1 -; AVX-NEXT: vmovq %r9, %xmm0 -; AVX-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] -; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpmuldq %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vpaddq {{[0-9]+}}(%rsp), %xmm0, %xmm0 -; AVX-NEXT: vmovdqa %xmm0, 32(%rdi) -; AVX-NEXT: vmovdqa %ymm1, (%rdi) -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_v6i64: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: movq %rdi, %rax +; AVXIFMA-NEXT: vmovq %r8, %xmm0 +; AVXIFMA-NEXT: vmovq %rcx, %xmm1 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVXIFMA-NEXT: vmovq %rdx, %xmm1 +; AVXIFMA-NEXT: vmovq %rsi, %xmm2 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVXIFMA-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVXIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm1 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm2 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm1 +; AVXIFMA-NEXT: vmovq %r9, %xmm0 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVXIFMA-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVXIFMA-NEXT: vpmuldq %xmm0, %xmm0, %xmm0 +; AVXIFMA-NEXT: vpaddq {{[0-9]+}}(%rsp), %xmm0, %xmm0 +; AVXIFMA-NEXT: vmovdqa %xmm0, 32(%rdi) +; AVXIFMA-NEXT: vmovdqa %ymm1, (%rdi) +; AVXIFMA-NEXT: vzeroupper +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_v6i64: ; AVX512: # %bb.0: @@ -493,6 +603,13 @@ define <6 x i64> @test_v6i64(<6 x i64> %x, <6 x i64> %y, <6 x i64> %z) { ; AVX512-NEXT: vpmuludq %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_v6i64: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpmuludq %zmm0, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vpaddq %zmm2, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: retq %x_masked = and <6 x i64> %x, splat (i64 67108863) %y_masked = and <6 x i64> %x, splat (i64 67108863) %mul = mul <6 x i64> %x_masked, %y_masked @@ -501,43 +618,43 @@ define <6 x i64> @test_v6i64(<6 x i64> %x, <6 x i64> %y, <6 x i64> %z) { } define <9 x i64> @test_v9i64(<9 x i64> %x, <9 x i64> %y, <9 x i64> %z) { -; AVX-LABEL: test_v9i64: -; AVX: # %bb.0: -; AVX-NEXT: movq %rdi, %rax -; AVX-NEXT: vmovq %r8, %xmm0 -; AVX-NEXT: vmovq %rcx, %xmm1 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX-NEXT: vmovq %rdx, %xmm1 -; AVX-NEXT: vmovq %rsi, %xmm2 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; AVX-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 -; AVX-NEXT: vmovq %r9, %xmm1 -; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 -; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero -; AVX-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm3 -; AVX-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm4 -; AVX-NEXT: vpbroadcastq {{.*#+}} ymm5 = [67108863,67108863,67108863,67108863] -; AVX-NEXT: vpand %ymm5, %ymm0, %ymm0 -; AVX-NEXT: vpand %ymm5, %ymm1, %ymm1 -; AVX-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF -; AVX-NEXT: vmovq %rcx, %xmm5 -; AVX-NEXT: vmovq {{.*#+}} xmm6 = mem[0],zero -; AVX-NEXT: vpand %xmm5, %xmm6, %xmm5 -; AVX-NEXT: vpsrlq $32, %xmm5, %xmm6 -; AVX-NEXT: vpmuludq %xmm6, %xmm5, %xmm6 -; AVX-NEXT: vpsllq $33, %xmm6, %xmm6 -; AVX-NEXT: vpmuludq %xmm5, %xmm5, %xmm5 -; AVX-NEXT: vpaddq %xmm2, %xmm5, %xmm2 -; AVX-NEXT: vpaddq %xmm6, %xmm2, %xmm2 -; AVX-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm4 -; AVX-NEXT: {vex} vpmadd52luq %ymm1, %ymm1, %ymm3 -; AVX-NEXT: vmovdqa %ymm3, 32(%rdi) -; AVX-NEXT: vmovdqa %ymm4, (%rdi) -; AVX-NEXT: vmovq %xmm2, 64(%rdi) -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVXIFMA-LABEL: test_v9i64: +; AVXIFMA: # %bb.0: +; AVXIFMA-NEXT: movq %rdi, %rax +; AVXIFMA-NEXT: vmovq %r8, %xmm0 +; AVXIFMA-NEXT: vmovq %rcx, %xmm1 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVXIFMA-NEXT: vmovq %rdx, %xmm1 +; AVXIFMA-NEXT: vmovq %rsi, %xmm2 +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVXIFMA-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVXIFMA-NEXT: vmovq %r9, %xmm1 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVXIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVXIFMA-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVXIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm3 +; AVXIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm4 +; AVXIFMA-NEXT: vpbroadcastq {{.*#+}} ymm5 = [67108863,67108863,67108863,67108863] +; AVXIFMA-NEXT: vpand %ymm5, %ymm0, %ymm0 +; AVXIFMA-NEXT: vpand %ymm5, %ymm1, %ymm1 +; AVXIFMA-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF +; AVXIFMA-NEXT: vmovq %rcx, %xmm5 +; AVXIFMA-NEXT: vmovq {{.*#+}} xmm6 = mem[0],zero +; AVXIFMA-NEXT: vpand %xmm5, %xmm6, %xmm5 +; AVXIFMA-NEXT: vpsrlq $32, %xmm5, %xmm6 +; AVXIFMA-NEXT: vpmuludq %xmm6, %xmm5, %xmm6 +; AVXIFMA-NEXT: vpsllq $33, %xmm6, %xmm6 +; AVXIFMA-NEXT: vpmuludq %xmm5, %xmm5, %xmm5 +; AVXIFMA-NEXT: vpaddq %xmm2, %xmm5, %xmm2 +; AVXIFMA-NEXT: vpaddq %xmm6, %xmm2, %xmm2 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm4 +; AVXIFMA-NEXT: {vex} vpmadd52luq %ymm1, %ymm1, %ymm3 +; AVXIFMA-NEXT: vmovdqa %ymm3, 32(%rdi) +; AVXIFMA-NEXT: vmovdqa %ymm4, (%rdi) +; AVXIFMA-NEXT: vmovq %xmm2, 64(%rdi) +; AVXIFMA-NEXT: vzeroupper +; AVXIFMA-NEXT: retq ; ; AVX512-LABEL: test_v9i64: ; AVX512: # %bb.0: @@ -572,6 +689,44 @@ define <9 x i64> @test_v9i64(<9 x i64> %x, <9 x i64> %y, <9 x i64> %z) { ; AVX512-NEXT: vmovdqa64 %zmm2, (%rdi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq +; +; AVX512-NOIFMA-LABEL: test_v9i64: +; AVX512-NOIFMA: # %bb.0: +; AVX512-NOIFMA-NEXT: movq %rdi, %rax +; AVX512-NOIFMA-NEXT: vmovq %r8, %xmm0 +; AVX512-NOIFMA-NEXT: vmovq %rcx, %xmm1 +; AVX512-NOIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NOIFMA-NEXT: vmovq %rdx, %xmm1 +; AVX512-NOIFMA-NEXT: vmovq %rsi, %xmm2 +; AVX512-NOIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX512-NOIFMA-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NOIFMA-NEXT: vmovq %r9, %xmm1 +; AVX512-NOIFMA-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX512-NOIFMA-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX512-NOIFMA-NEXT: vinserti128 $1, {{[0-9]+}}(%rsp), %ymm1, %ymm1 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NOIFMA-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512-NOIFMA-NEXT: movl $67108863, %ecx # imm = 0x3FFFFFF +; AVX512-NOIFMA-NEXT: vmovq %rcx, %xmm2 +; AVX512-NOIFMA-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero +; AVX512-NOIFMA-NEXT: vpand %xmm2, %xmm3, %xmm2 +; AVX512-NOIFMA-NEXT: vpsrlq $32, %xmm2, %xmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %xmm3, %xmm2, %xmm3 +; AVX512-NOIFMA-NEXT: vpsllq $33, %xmm3, %xmm3 +; AVX512-NOIFMA-NEXT: vpmuludq %xmm2, %xmm2, %xmm2 +; AVX512-NOIFMA-NEXT: vpaddq %xmm1, %xmm2, %xmm1 +; AVX512-NOIFMA-NEXT: vpaddq %xmm3, %xmm1, %xmm1 +; AVX512-NOIFMA-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; AVX512-NOIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm3 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm2, %ymm2, %ymm3 +; AVX512-NOIFMA-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm2 +; AVX512-NOIFMA-NEXT: {vex} vpmadd52luq %ymm0, %ymm0, %ymm2 +; AVX512-NOIFMA-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm0 +; AVX512-NOIFMA-NEXT: vmovq %xmm1, 64(%rdi) +; AVX512-NOIFMA-NEXT: vmovdqa64 %zmm0, (%rdi) +; AVX512-NOIFMA-NEXT: vzeroupper +; AVX512-NOIFMA-NEXT: retq %x_masked = and <9 x i64> %x, splat (i64 67108863) %y_masked = and <9 x i64> %x, splat (i64 67108863) %mul = mul <9 x i64> %x_masked, %y_masked diff --git a/llvm/test/CodeGen/X86/known-signbits-shl.ll b/llvm/test/CodeGen/X86/known-signbits-shl.ll index 473fecc307ed4..57d557dec11b9 100644 --- a/llvm/test/CodeGen/X86/known-signbits-shl.ll +++ b/llvm/test/CodeGen/X86/known-signbits-shl.ll @@ -137,7 +137,7 @@ define void @computeNumSignBits_shl_zext_vec_3(<2 x i8> %x, ptr %p) nounwind { ; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-NEXT: por %xmm2, %xmm1 ; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: paddw %xmm0, %xmm2 +; X64-NEXT: paddw %xmm2, %xmm2 ; X64-NEXT: movdqa %xmm2, %xmm3 ; X64-NEXT: psraw $1, %xmm3 ; X64-NEXT: pcmpeqw %xmm0, %xmm3 diff --git a/llvm/test/CodeGen/X86/llvm.frexp.ll b/llvm/test/CodeGen/X86/llvm.frexp.ll index 83840dd85c533..e3a1b1b83b2e3 100644 --- a/llvm/test/CodeGen/X86/llvm.frexp.ll +++ b/llvm/test/CodeGen/X86/llvm.frexp.ll @@ -582,6 +582,22 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind { ret i32 %result.0 } +define { float, i32 } @pr160981() { +; X64-LABEL: pr160981: +; X64: # %bb.0: +; X64-NEXT: movss {{.*#+}} xmm0 = [9.9999988E-1,0.0E+0,0.0E+0,0.0E+0] +; X64-NEXT: movl $-126, %eax +; X64-NEXT: retq +; +; WIN32-LABEL: pr160981: +; WIN32: # %bb.0: +; WIN32-NEXT: flds __real@3f7ffffe +; WIN32-NEXT: movl $-126, %eax +; WIN32-NEXT: retl + %ret = call { float, i32 } @llvm.frexp.f32.i32(float bitcast (i32 8388607 to float)) + ret { float, i32 } %ret +} + ; FIXME: Widen vector result ; define { <2 x double>, <2 x i32> } @test_frexp_v2f64_v2i32(<2 x double> %a) nounwind { ; %result = call { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double> %a) diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index 4e6f666fa05de..4cde581c10508 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -4806,9 +4806,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-KNL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: retq @@ -4830,9 +4829,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: retq @@ -4842,10 +4840,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax ; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: retq @@ -4875,9 +4872,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-KNL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: retq @@ -4899,9 +4895,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: retq @@ -4911,10 +4906,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax ; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1} ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: retq @@ -4944,9 +4938,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1 -; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0 -; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0 +; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2 ; X64-KNL-NEXT: kmovw %k1, %k2 ; X64-KNL-NEXT: vmovaps %zmm1, %zmm0 ; X64-KNL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} @@ -4972,9 +4965,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0 -; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0 -; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0 +; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2 ; X64-SKX-SMALL-NEXT: kmovw %k1, %k2 ; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} @@ -4986,10 +4978,9 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair( ; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1 -; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0 +; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0 ; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax -; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0 -; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm2 +; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm2 ; X64-SKX-LARGE-NEXT: kmovw %k1, %k2 ; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0 ; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2} diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll index f24507d3a4f38..4836da2ad7797 100644 --- a/llvm/test/CodeGen/X86/negative-sin.ll +++ b/llvm/test/CodeGen/X86/negative-sin.ll @@ -82,18 +82,13 @@ define double @semi_strict2(double %e) nounwind { ret double %h } -; FIXME: -; Auto-upgrade function attribute to IR-level fast-math-flags. - -define double @fn_attr(double %e) nounwind #0 { -; CHECK-LABEL: fn_attr: +define double @nsz_flag(double %e) nounwind { +; CHECK-LABEL: nsz_flag: ; CHECK: # %bb.0: ; CHECK-NEXT: jmp sin@PLT # TAILCALL - %f = fsub double 0.0, %e - %g = call double @sin(double %f) readonly - %h = fsub double 0.0, %g + %f = fsub nsz double 0.0, %e + %g = call nsz double @sin(double %f) readonly + %h = fsub nsz double 0.0, %g ret double %h } -attributes #0 = { "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" } - diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll index f53983036a016..5df1867f73c8e 100644 --- a/llvm/test/CodeGen/X86/oddsubvector.ll +++ b/llvm/test/CodeGen/X86/oddsubvector.ll @@ -155,10 +155,10 @@ define <16 x i32> @PR42819(ptr %a0) { define void @PR42833() { ; SSE2-LABEL: PR42833: ; SSE2: # %bb.0: +; SSE2-NEXT: movl b(%rip), %eax ; SSE2-NEXT: movdqa c+144(%rip), %xmm2 ; SSE2-NEXT: movdqa c+128(%rip), %xmm0 -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: addl b(%rip), %eax +; SSE2-NEXT: addl c+128(%rip), %eax ; SSE2-NEXT: movd %eax, %xmm1 ; SSE2-NEXT: movd %eax, %xmm3 ; SSE2-NEXT: paddd %xmm0, %xmm3 @@ -166,7 +166,7 @@ define void @PR42833() { ; SSE2-NEXT: psubd %xmm2, %xmm4 ; SSE2-NEXT: paddd %xmm2, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm5 +; SSE2-NEXT: paddd %xmm5, %xmm5 ; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3] ; SSE2-NEXT: movdqa %xmm2, c+144(%rip) ; SSE2-NEXT: movaps %xmm5, c+128(%rip) @@ -191,17 +191,17 @@ define void @PR42833() { ; ; SSE42-LABEL: PR42833: ; SSE42: # %bb.0: +; SSE42-NEXT: movl b(%rip), %eax ; SSE42-NEXT: movdqa c+144(%rip), %xmm1 ; SSE42-NEXT: movdqa c+128(%rip), %xmm0 -; SSE42-NEXT: movd %xmm0, %eax -; SSE42-NEXT: addl b(%rip), %eax +; SSE42-NEXT: addl c+128(%rip), %eax ; SSE42-NEXT: movd %eax, %xmm2 ; SSE42-NEXT: paddd %xmm0, %xmm2 ; SSE42-NEXT: movdqa d+144(%rip), %xmm3 ; SSE42-NEXT: psubd %xmm1, %xmm3 ; SSE42-NEXT: paddd %xmm1, %xmm1 ; SSE42-NEXT: movdqa %xmm0, %xmm4 -; SSE42-NEXT: paddd %xmm0, %xmm4 +; SSE42-NEXT: paddd %xmm4, %xmm4 ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7] ; SSE42-NEXT: movdqa %xmm1, c+144(%rip) ; SSE42-NEXT: movdqa %xmm4, c+128(%rip) diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll index ce03f8fad4a19..161e9651a9cf2 100644 --- a/llvm/test/CodeGen/X86/pr62286.ll +++ b/llvm/test/CodeGen/X86/pr62286.ll @@ -26,27 +26,33 @@ define i64 @PR62286(i32 %a) { ; AVX1-LABEL: PR62286: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 -; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] +; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7] -; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] -; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 -; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR62286: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovd %edi, %xmm0 -; AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm1 -; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3] -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 @@ -59,12 +65,12 @@ define i64 @PR62286(i32 %a) { ; AVX512-LABEL: PR62286: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovd %edi, %xmm0 -; AVX512-NEXT: movb $8, %al +; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0] +; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm1 +; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0 +; AVX512-NEXT: movw $4369, %ax # imm = 0x1111 ; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z} -; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] +; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} ; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 diff --git a/llvm/test/CodeGen/X86/pr74736.ll b/llvm/test/CodeGen/X86/pr74736.ll index ceccee00c9457..58955265580bd 100644 --- a/llvm/test/CodeGen/X86/pr74736.ll +++ b/llvm/test/CodeGen/X86/pr74736.ll @@ -6,8 +6,8 @@ define void @main(<16 x i32> %0, i32 %1) { ; SSE-LABEL: main: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movd %edi, %xmm4 -; SSE-NEXT: movss {{.*#+}} xmm0 = [1,0,0,0] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[1,0] +; SSE-NEXT: movsd {{.*#+}} xmm0 = [0,1,0,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,0] ; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: paddd %xmm1, %xmm1 ; SSE-NEXT: paddd %xmm3, %xmm3 @@ -32,20 +32,20 @@ define void @main(<16 x i32> %0, i32 %1) { ; AVX-LABEL: main: ; AVX: # %bb.0: # %entry ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm0[1,2,3] ; AVX-NEXT: movl $1, %eax ; AVX-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 ; AVX-NEXT: vpinsrd $3, %edi, %xmm2, %xmm2 -; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] -; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm1 -; AVX-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,1,3,3,5,5,7] -; AVX-NEXT: vpermd %ymm0, %ymm2, %ymm2 +; AVX-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: vpaddd %ymm2, %ymm2, %ymm2 +; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm3 ; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] ; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7] -; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7] +; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0 +; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,5,7] ; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7] -; AVX-NEXT: vpxor %ymm0, %ymm2, %ymm0 +; AVX-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,1,1,3,3,5,5,7] +; AVX-NEXT: vpermd %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vpxor %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] diff --git a/llvm/test/CodeGen/X86/rematerialize-sub-super-reg.mir b/llvm/test/CodeGen/X86/rematerialize-sub-super-reg.mir index b99c5fc8df0cb..44a2aecdc3672 100644 --- a/llvm/test/CodeGen/X86/rematerialize-sub-super-reg.mir +++ b/llvm/test/CodeGen/X86/rematerialize-sub-super-reg.mir @@ -165,5 +165,25 @@ body: | bb.3: $rax = COPY %t3 RET 0, $rax - ... +--- +name: rematerialize_superregister_into_subregister_def_with_impdef_physreg +body: | + bb.0.entry: + ; CHECK-LABEL: name: rematerialize_superregister_into_subregister_def_with_impdef_physreg + ; CHECK: dead $esi = MOV32r0 implicit-def dead $eflags, implicit-def $rsi + ; CHECK-NEXT: dead $edx = MOV32r0 implicit-def dead $eflags, implicit-def $rdx + ; CHECK-NEXT: FAKE_USE implicit killed $rsi, implicit killed $rdx + ; CHECK-NEXT: dead $eax = MOV32r0 implicit-def dead $eflags, implicit-def dead $rax, implicit-def $al + ; CHECK-NEXT: FAKE_USE implicit killed $al + ; CHECK-NEXT: $eax = MOV32r0 implicit-def dead $eflags + ; CHECK-NEXT: RET 0, $eax + undef %1.sub_32bit:gr64_with_sub_8bit = MOV32r0 implicit-def dead $eflags, implicit-def %1 + $rsi = COPY %1 + $rdx = COPY %1 + FAKE_USE implicit killed $rsi, implicit killed $rdx + %4:gr8 = COPY killed %1.sub_8bit + $al = COPY killed %4 + FAKE_USE implicit killed $al + $eax = MOV32r0 implicit-def dead $eflags + RET 0, killed $eax diff --git a/llvm/test/CodeGen/X86/shift-i512.ll b/llvm/test/CodeGen/X86/shift-i512.ll index 756019d0e98a0..03b61d9235254 100644 --- a/llvm/test/CodeGen/X86/shift-i512.ll +++ b/llvm/test/CodeGen/X86/shift-i512.ll @@ -10,7 +10,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: valignq {{.*#+}} zmm1 = zmm0[3,4,5,6,7,0,1,2] ; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VL-NEXT: vpsllq $1, %xmm0, %xmm3 +; AVX512VL-NEXT: vpaddq %xmm0, %xmm0, %xmm3 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] ; AVX512VL-NEXT: vpsrlq $63, %xmm4, %xmm4 ; AVX512VL-NEXT: vpaddq %xmm2, %xmm2, %xmm2 @@ -34,7 +34,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX512VBMI-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 -; AVX512VBMI-NEXT: vpsllq $1, %xmm0, %xmm4 +; AVX512VBMI-NEXT: vpaddq %xmm0, %xmm0, %xmm4 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7] @@ -51,7 +51,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm1 ; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm2 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] -; ZNVER4-NEXT: vpsllq $1, %xmm0, %xmm4 +; ZNVER4-NEXT: vpaddq %xmm0, %xmm0, %xmm4 ; ZNVER4-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; ZNVER4-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 ; ZNVER4-NEXT: vextracti64x4 $1, %zmm0, %ymm2 diff --git a/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll b/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll index 82c460fc55938..571915b47d297 100644 --- a/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll +++ b/llvm/test/CodeGen/X86/shuffle-strided-with-offset-512.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL,AVX512VL-FAST-ALL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL,AVX512VL-FAST-PERLANE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW,AVX512BW-FAST-ALL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW,AVX512BW-FAST-PERLANE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-FAST-ALL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-FAST-PERLANE @@ -21,6 +21,31 @@ define void @shuffle_v64i8_to_v32i8_1(ptr %L, ptr %S) nounwind { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512VL-FAST-ALL-LABEL: shuffle_v64i8_to_v32i8_1: +; AVX512VL-FAST-ALL: # %bb.0: +; AVX512VL-FAST-ALL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VL-FAST-ALL-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX512VL-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31] +; AVX512VL-FAST-ALL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u] +; AVX512VL-FAST-ALL-NEXT: vpmovsxbq {{.*#+}} ymm2 = [0,2,5,7] +; AVX512VL-FAST-ALL-NEXT: vpermi2q %ymm1, %ymm0, %ymm2 +; AVX512VL-FAST-ALL-NEXT: vmovdqa %ymm2, (%rsi) +; AVX512VL-FAST-ALL-NEXT: vzeroupper +; AVX512VL-FAST-ALL-NEXT: retq +; +; AVX512VL-FAST-PERLANE-LABEL: shuffle_v64i8_to_v32i8_1: +; AVX512VL-FAST-PERLANE: # %bb.0: +; AVX512VL-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512VL-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX512VL-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15,1,3,5,7,9,11,13,15] +; AVX512VL-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm1, %ymm1 +; AVX512VL-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm0, %ymm0 +; AVX512VL-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] +; AVX512VL-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VL-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rsi) +; AVX512VL-FAST-PERLANE-NEXT: vzeroupper +; AVX512VL-FAST-PERLANE-NEXT: retq +; ; AVX512BW-LABEL: shuffle_v64i8_to_v32i8_1: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsrlw $8, (%rdi), %zmm0 @@ -63,6 +88,40 @@ define void @shuffle_v16i32_to_v8i32_1(ptr %L, ptr %S) nounwind { ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; +; AVX512VL-FAST-ALL-LABEL: shuffle_v16i32_to_v8i32_1: +; AVX512VL-FAST-ALL: # %bb.0: +; AVX512VL-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15] +; AVX512VL-FAST-ALL-NEXT: vpermps (%rdi), %zmm0, %zmm0 +; AVX512VL-FAST-ALL-NEXT: vmovaps %ymm0, (%rsi) +; AVX512VL-FAST-ALL-NEXT: vzeroupper +; AVX512VL-FAST-ALL-NEXT: retq +; +; AVX512VL-FAST-PERLANE-LABEL: shuffle_v16i32_to_v8i32_1: +; AVX512VL-FAST-PERLANE: # %bb.0: +; AVX512VL-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0 +; AVX512VL-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],mem[1,3],ymm0[5,7],mem[5,7] +; AVX512VL-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VL-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rsi) +; AVX512VL-FAST-PERLANE-NEXT: vzeroupper +; AVX512VL-FAST-PERLANE-NEXT: retq +; +; AVX512BW-FAST-ALL-LABEL: shuffle_v16i32_to_v8i32_1: +; AVX512BW-FAST-ALL: # %bb.0: +; AVX512BW-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15] +; AVX512BW-FAST-ALL-NEXT: vpermps (%rdi), %zmm0, %zmm0 +; AVX512BW-FAST-ALL-NEXT: vmovaps %ymm0, (%rsi) +; AVX512BW-FAST-ALL-NEXT: vzeroupper +; AVX512BW-FAST-ALL-NEXT: retq +; +; AVX512BW-FAST-PERLANE-LABEL: shuffle_v16i32_to_v8i32_1: +; AVX512BW-FAST-PERLANE: # %bb.0: +; AVX512BW-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0 +; AVX512BW-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],mem[1,3],ymm0[5,7],mem[5,7] +; AVX512BW-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512BW-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rsi) +; AVX512BW-FAST-PERLANE-NEXT: vzeroupper +; AVX512BW-FAST-PERLANE-NEXT: retq +; ; AVX512BWVL-FAST-ALL-LABEL: shuffle_v16i32_to_v8i32_1: ; AVX512BWVL-FAST-ALL: # %bb.0: ; AVX512BWVL-FAST-ALL-NEXT: vpmovsxbd {{.*#+}} ymm0 = [1,3,5,7,9,11,13,15] diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll index 3f48b22e2b9ff..a48be037ebebc 100644 --- a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll @@ -5791,20 +5791,20 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi16: ; SSE: # %bb.0: -; SSE-NEXT: psllw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x01] +; SSE-NEXT: psllw $2, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi16: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x01] +; AVX1-NEXT: vpsllw $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x01] +; AVX512-NEXT: vpsllw $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %arg0 = bitcast <2 x i64> %a0 to <8 x i16> - %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1) + %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 2) %bc = bitcast <8 x i16> %res to <2 x i64> ret <2 x i64> %bc } @@ -5813,20 +5813,20 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi32: ; SSE: # %bb.0: -; SSE-NEXT: pslld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x01] +; SSE-NEXT: pslld $2, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpslld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x01] +; AVX1-NEXT: vpslld $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpslld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x01] +; AVX512-NEXT: vpslld $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] %arg0 = bitcast <2 x i64> %a0 to <4 x i32> - %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1) + %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 2) %bc = bitcast <4 x i32> %res to <2 x i64> ret <2 x i64> %bc } @@ -5835,19 +5835,19 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) { ; SSE-LABEL: test_mm_slli_epi64: ; SSE: # %bb.0: -; SSE-NEXT: psllq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x01] +; SSE-NEXT: psllq $2, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x02] ; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX1-LABEL: test_mm_slli_epi64: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsllq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x01] +; AVX1-NEXT: vpsllq $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x02] ; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3] ; ; AVX512-LABEL: test_mm_slli_epi64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpsllq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x01] +; AVX512-NEXT: vpsllq $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x02] ; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3] - %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1) + %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 2) ret <2 x i64> %res } declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone diff --git a/llvm/test/CodeGen/X86/vec_shift6.ll b/llvm/test/CodeGen/X86/vec_shift6.ll index 71e659c681d17..219e32c86c848 100644 --- a/llvm/test/CodeGen/X86/vec_shift6.ll +++ b/llvm/test/CodeGen/X86/vec_shift6.ll @@ -28,14 +28,14 @@ define <8 x i16> @test2(<8 x i16> %a) { ; SSE2-LABEL: test2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddw %xmm0, %xmm1 +; SSE2-NEXT: paddw %xmm1, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; SSE2-NEXT: retq ; ; SSE41-LABEL: test2: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddw %xmm0, %xmm1 +; SSE41-NEXT: paddw %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; @@ -56,7 +56,7 @@ define <4 x i32> @test3(<4 x i32> %a) { ; SSE2-LABEL: test3: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm1 ; SSE2-NEXT: pslld $2, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq @@ -81,14 +81,14 @@ define <4 x i32> @test4(<4 x i32> %a) { ; SSE2-LABEL: test4: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: paddd %xmm1, %xmm1 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; SSE2-NEXT: retq ; ; SSE41-LABEL: test4: ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: paddd %xmm0, %xmm1 +; SSE41-NEXT: paddd %xmm1, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll index 0b98a9388adc1..445e572aff403 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll @@ -679,6 +679,19 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt) ; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512VLBW-NEXT: retq ; +; AVX512VLVBMI2-LABEL: var_funnnel_v32i8: +; AVX512VLVBMI2: # %bb.0: +; AVX512VLVBMI2-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512VLVBMI2-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,64,1,65,2,66,3,67,4,68,5,69,6,70,7,71,8,72,9,73,10,74,11,75,12,76,13,77,14,78,15,79,16,80,17,81,18,82,19,83,20,84,21,85,22,86,23,87,24,88,25,89,26,90,27,91,28,92,29,93,30,94,31,95] +; AVX512VLVBMI2-NEXT: vpermi2b %zmm0, %zmm1, %zmm3 +; AVX512VLVBMI2-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm2, %ymm0 +; AVX512VLVBMI2-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512VLVBMI2-NEXT: vpsllvw %zmm0, %zmm3, %zmm0 +; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512VLVBMI2-NEXT: retq +; ; XOPAVX1-LABEL: var_funnnel_v32i8: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 @@ -1918,6 +1931,17 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512VLBW-NEXT: retq ; +; AVX512VLVBMI2-LABEL: constant_funnnel_v32i8: +; AVX512VLVBMI2: # %bb.0: +; AVX512VLVBMI2-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 +; AVX512VLVBMI2-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 +; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,64,1,65,2,66,3,67,4,68,5,69,6,70,7,71,8,72,9,73,10,74,11,75,12,76,13,77,14,78,15,79,16,80,17,81,18,82,19,83,20,84,21,85,22,86,23,87,24,88,25,89,26,90,27,91,28,92,29,93,30,94,31,95] +; AVX512VLVBMI2-NEXT: vpermi2b %zmm0, %zmm1, %zmm2 +; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 +; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vpmovwb %zmm0, %ymm0 +; AVX512VLVBMI2-NEXT: retq +; ; XOPAVX1-LABEL: constant_funnnel_v32i8: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll index 5c485592295d3..b4cffcd171b33 100644 --- a/llvm/test/CodeGen/X86/vector-gep.ll +++ b/llvm/test/CodeGen/X86/vector-gep.ll @@ -122,91 +122,87 @@ define <64 x ptr> @AGEP9(ptr %param, <64 x i32> %off) nounwind { ; CHECK-NEXT: movl %esp, %ebp ; CHECK-NEXT: andl $-32, %esp ; CHECK-NEXT: subl $160, %esp -; CHECK-NEXT: vmovdqa %ymm2, %ymm5 -; CHECK-NEXT: vmovdqa %ymm1, %ymm3 -; CHECK-NEXT: vmovdqa %ymm0, %ymm1 -; CHECK-NEXT: vmovdqa 72(%ebp), %ymm0 -; CHECK-NEXT: vmovdqa 40(%ebp), %ymm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm4 -; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm7 -; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4 -; CHECK-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm3 +; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm5 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 +; CHECK-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 104(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 136(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 168(%ebp), %ymm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2 -; CHECK-NEXT: vmovdqa %xmm2, (%esp) # 16-byte Spill -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovdqa 40(%ebp), %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm2 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0 -; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm7, %xmm1 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm6 -; CHECK-NEXT: vpaddd %xmm6, %xmm7, %xmm6 -; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm7, %xmm3 -; CHECK-NEXT: vmovdqa %ymm5, %ymm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm5 -; CHECK-NEXT: vpaddd %xmm5, %xmm7, %xmm5 -; CHECK-NEXT: vextractf128 $1, %ymm4, %xmm4 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 56(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 72(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa %xmm0, (%esp) # 16-byte Spill +; CHECK-NEXT: vmovdqa 88(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm2 +; CHECK-NEXT: vmovdqa 104(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm1 +; CHECK-NEXT: vmovdqa 120(%ebp), %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa 136(%ebp), %xmm6 +; CHECK-NEXT: vpaddd %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vpaddd %xmm6, %xmm5, %xmm6 +; CHECK-NEXT: vmovdqa 152(%ebp), %xmm7 +; CHECK-NEXT: vpaddd %xmm7, %xmm7, %xmm7 +; CHECK-NEXT: vpaddd %xmm7, %xmm5, %xmm7 +; CHECK-NEXT: vmovdqa 168(%ebp), %xmm4 ; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4 +; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4 +; CHECK-NEXT: vmovdqa 184(%ebp), %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 ; CHECK-NEXT: movl 8(%ebp), %eax -; CHECK-NEXT: vmovdqa %xmm4, 80(%eax) -; CHECK-NEXT: vmovdqa %xmm5, 64(%eax) -; CHECK-NEXT: vmovdqa %xmm3, 48(%eax) -; CHECK-NEXT: vmovdqa %xmm6, 32(%eax) -; CHECK-NEXT: vmovdqa %xmm1, 16(%eax) -; CHECK-NEXT: vmovdqa %xmm0, (%eax) -; CHECK-NEXT: vmovdqa %xmm2, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm3, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm4, 224(%eax) +; CHECK-NEXT: vmovdqa %xmm7, 208(%eax) +; CHECK-NEXT: vmovdqa %xmm6, 192(%eax) +; CHECK-NEXT: vmovdqa %xmm0, 176(%eax) +; CHECK-NEXT: vmovdqa %xmm1, 160(%eax) +; CHECK-NEXT: vmovdqa %xmm2, 144(%eax) ; CHECK-NEXT: vmovaps (%esp), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 224(%eax) +; CHECK-NEXT: vmovaps %xmm0, 128(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 208(%eax) +; CHECK-NEXT: vmovaps %xmm0, 112(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 192(%eax) +; CHECK-NEXT: vmovaps %xmm0, 96(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 176(%eax) +; CHECK-NEXT: vmovaps %xmm0, 80(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 160(%eax) +; CHECK-NEXT: vmovaps %xmm0, 64(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 144(%eax) +; CHECK-NEXT: vmovaps %xmm0, 48(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 128(%eax) +; CHECK-NEXT: vmovaps %xmm0, 32(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 112(%eax) +; CHECK-NEXT: vmovaps %xmm0, 16(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload -; CHECK-NEXT: vmovaps %xmm0, 96(%eax) +; CHECK-NEXT: vmovaps %xmm0, (%eax) ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp ; CHECK-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index 13f7d68ccb893..33d80f63dbcc8 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -652,7 +652,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: paddb %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psllw $1, %xmm2 +; SSE2-NEXT: paddw %xmm2, %xmm2 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -678,7 +678,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: paddb %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psllw $1, %xmm2 +; SSE41-NEXT: paddw %xmm2, %xmm2 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE41-NEXT: psrlw $2, %xmm1 ; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 @@ -701,7 +701,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -720,7 +720,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX2NOBW-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX2NOBW-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX2NOBW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 @@ -739,7 +739,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %xmm2, %xmm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX512BW-NEXT: vpsllw $1, %xmm1, %xmm2 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm1, %xmm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll index 1a5c3730c1839..e43108fe7d784 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -590,7 +590,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5 ; AVX1-NEXT: vpaddb %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpsllw $1, %xmm3, %xmm5 +; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm5 ; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5 ; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 @@ -609,7 +609,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpsllw $1, %xmm2, %xmm3 +; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2 @@ -633,7 +633,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vpsllw $1, %ymm1, %ymm2 +; AVX2NOBW-NEXT: vpaddw %ymm1, %ymm1, %ymm2 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 @@ -651,7 +651,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1 -; AVX512BW-NEXT: vpsllw $1, %ymm1, %ymm2 +; AVX512BW-NEXT: vpaddw %ymm1, %ymm1, %ymm2 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 ; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll index 9c56894f0c59c..bf98bcca59c04 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -485,7 +485,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] ; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5 ; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3 -; AVX512F-NEXT: vpsllw $1, %ymm3, %ymm5 +; AVX512F-NEXT: vpaddw %ymm3, %ymm3, %ymm5 ; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248] ; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5 ; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 @@ -504,7 +504,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3 ; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2 -; AVX512F-NEXT: vpsllw $1, %ymm2, %ymm3 +; AVX512F-NEXT: vpaddw %ymm2, %ymm2, %ymm3 ; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 ; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2 @@ -528,7 +528,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind { ; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2 ; AVX512BW-NEXT: vpaddb %zmm1, %zmm2, %zmm1 -; AVX512BW-NEXT: vpsllw $1, %zmm1, %zmm2 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm2 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2 ; AVX512BW-NEXT: vpsrlw $2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1 diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll index 13b21a747878b..6e1bf25908302 100644 --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -821,10 +821,10 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; X86-SSE-NEXT: andl $-16, %esp ; X86-SSE-NEXT: subl $16, %esp ; X86-SSE-NEXT: movdqa %xmm1, %xmm3 -; X86-SSE-NEXT: paddw %xmm1, %xmm3 +; X86-SSE-NEXT: paddw %xmm3, %xmm3 ; X86-SSE-NEXT: paddw %xmm3, %xmm1 ; X86-SSE-NEXT: movdqa %xmm0, %xmm3 -; X86-SSE-NEXT: paddw %xmm0, %xmm3 +; X86-SSE-NEXT: paddw %xmm3, %xmm3 ; X86-SSE-NEXT: paddw %xmm2, %xmm0 ; X86-SSE-NEXT: paddw %xmm3, %xmm0 ; X86-SSE-NEXT: paddw 8(%ebp), %xmm1 @@ -835,9 +835,9 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind { ; X64-SSE-LABEL: madd_v16i16_3: ; X64-SSE: # %bb.0: ; X64-SSE-NEXT: movdqa %xmm1, %xmm4 -; X64-SSE-NEXT: paddw %xmm1, %xmm4 +; X64-SSE-NEXT: paddw %xmm4, %xmm4 ; X64-SSE-NEXT: movdqa %xmm0, %xmm5 -; X64-SSE-NEXT: paddw %xmm0, %xmm5 +; X64-SSE-NEXT: paddw %xmm5, %xmm5 ; X64-SSE-NEXT: paddw %xmm2, %xmm0 ; X64-SSE-NEXT: paddw %xmm5, %xmm0 ; X64-SSE-NEXT: paddw %xmm3, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll index 227e000c6be7f..ab1feba98b008 100644 --- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll +++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll @@ -907,7 +907,7 @@ define i1 @mask_v8i32_2(<8 x i32> %a0) { ; SSE2-LABEL: mask_v8i32_2: ; SSE2: # %bb.0: ; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: pslld $1, %xmm0 +; SSE2-NEXT: paddd %xmm0, %xmm0 ; SSE2-NEXT: movmskps %xmm0, %eax ; SSE2-NEXT: testl %eax, %eax ; SSE2-NEXT: sete %al diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll index 2b1cf5b671e53..99dac74d8127b 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -927,7 +927,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; SSE2-LABEL: constant_shift_v2i64: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: paddq %xmm0, %xmm1 +; SSE2-NEXT: paddq %xmm1, %xmm1 ; SSE2-NEXT: psllq $7, %xmm0 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: retq @@ -975,7 +975,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind { ; X86-SSE-LABEL: constant_shift_v2i64: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 -; X86-SSE-NEXT: paddq %xmm0, %xmm1 +; X86-SSE-NEXT: paddq %xmm1, %xmm1 ; X86-SSE-NEXT: psllq $7, %xmm0 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; X86-SSE-NEXT: retl diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll index 2df013d0ff3e3..3279a50a1265b 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll @@ -370,6 +370,16 @@ define <8 x float> @constant_fold_vpermilvar_ps_256() { ret <8 x float> %1 } +define <8 x float> @freeze_vpermilvar_ps_256(<8 x float> %a0) { +; CHECK-LABEL: freeze_vpermilvar_ps_256: +; CHECK: # %bb.0: +; CHECK-NEXT: ret{{[l|q]}} + %s0 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> ) + %f0 = freeze <8 x float> %s0 + %s1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %f0, <8 x i32> ) + ret <8 x float> %s1 +} + define void @PR39483() { ; X86-AVX1-LABEL: PR39483: ; X86-AVX1: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index 298858a8fcc73..56c0b164b63d6 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -933,6 +933,16 @@ entry: ret i32 %tmp6 } +define <8 x float> @freeze_permps(<8 x float> %a0) { +; CHECK-LABEL: freeze_permps: +; CHECK: # %bb.0: +; CHECK-NEXT: ret{{[l|q]}} + %s0 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> ) + %f0 = freeze <8 x float> %s0 + %s1 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %f0, <8 x i32> ) + ret <8 x float> %s1 +} + define <32 x i8> @PR27320(<8 x i32> %a0) { ; CHECK-LABEL: PR27320: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll index d3e4906450e43..bec33492bbf1e 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll @@ -7,6 +7,7 @@ ; Combine tests involving SSE41 target shuffles (BLEND,INSERTPS,MOVZX) declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) +declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) define <16 x i8> @combine_vpshufb_as_movzx(<16 x i8> %a0) { ; SSE-LABEL: combine_vpshufb_as_movzx: @@ -58,6 +59,25 @@ define <4 x i32> @combine_blend_of_permutes_v4i32(<2 x i64> %a0, <2 x i64> %a1) ret <4 x i32> %r } +define <4 x float> @freeze_insertps(<4 x float> %a0, <4 x float> %a1) { +; SSE-LABEL: freeze_insertps: +; SSE: # %bb.0: +; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm0[1],xmm1[1,2,3] +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: freeze_insertps: +; AVX: # %bb.0: +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],xmm1[1,2,3] +; AVX-NEXT: retq + %s0 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 16) + %f0 = freeze <4 x float> %s0 + %s1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a1, <4 x float> %f0, i8 64) + ret <4 x float> %s1 +} + define <16 x i8> @PR50049(ptr %p1, ptr %p2) { ; SSE-LABEL: PR50049: ; SSE: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll index bd2710139d584..0e20b1813040a 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll @@ -896,6 +896,16 @@ define i32 @mask_z1z3_v16i8(<16 x i8> %a0) { ret i32 %4 } +define <16 x i8> @freeze_pshufb_v16i8(<16 x i8> %a0) { +; CHECK-LABEL: freeze_pshufb_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: retq + %s0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> ) + %f0 = freeze <16 x i8> %s0 + %s1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %f0, <16 x i8> ) + ret <16 x i8> %s1 +} + define i32 @PR22415(double %a0) { ; SSE-LABEL: PR22415: ; SSE: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index 5b61de5a3b772..ee9d8a55aeb3e 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -3550,14 +3550,14 @@ define <8 x i16> @PR141475(i32 %in) { ; SSE-LABEL: PR141475: ; SSE: # %bb.0: ; SSE-NEXT: movd %edi, %xmm0 -; SSE-NEXT: pslld $1, %xmm0 +; SSE-NEXT: paddd %xmm0, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE-NEXT: retq ; ; AVX-LABEL: PR141475: ; AVX: # %bb.0: ; AVX-NEXT: vmovd %edi, %xmm0 -; AVX-NEXT: vpslld $1, %xmm0, %xmm0 +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; AVX-NEXT: retq %mul = shl i32 %in, 1 diff --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll index 54dc107fd0c10..3b93734c24deb 100644 --- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll +++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll @@ -1438,26 +1438,26 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) { define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddw %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <8 x i16> %a0, %t1 = shl <8 x i16> %t0, @@ -1656,26 +1656,26 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) { define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddw %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <8 x i16> %a0, %t1 = shl <8 x i16> %t0, @@ -2373,40 +2373,40 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) { define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767] -; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534] +; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddd %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767] -; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534] +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq %t0 = and <4 x i32> %a0, %t1 = shl <4 x i32> %t0, @@ -2675,40 +2675,40 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) { define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224] -; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152] +; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X86-AVX2-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddd %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224] -; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152] +; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: retq %t0 = and <4 x i32> %a0, %t1 = shl <4 x i32> %t0, @@ -3325,26 +3325,26 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> % define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddq %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <2 x i64> %a0, %t1 = shl <2 x i64> %t0, @@ -3543,26 +3543,26 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) { define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: paddq %xmm0, %xmm0 +; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE2-NEXT: retq ; ; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 +; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %t0 = and <2 x i64> %a0, %t1 = shl <2 x i64> %t0, diff --git a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll index f3312ce0c5bd2..919f16b103090 100644 --- a/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll +++ b/llvm/test/Instrumentation/AddressSanitizer/RISCV/asan-rvv-intrinsics.ll @@ -1256,7 +1256,31 @@ define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vloxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1281,7 +1305,31 @@ define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vloxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]], i64 1) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1305,7 +1353,31 @@ define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16( @llvm.riscv.vloxei.nxv1f32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vloxei.nxv1f32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1328,7 +1400,31 @@ define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vluxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0:%.*]], [[TMP1:%.*]], i64 [[TMP2:%.*]]) +; CHECK-NEXT: [[TMP4:%.*]] = zext [[TMP1:%.*]] to +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP0:%.*]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP2:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[TMP7:%.*]], label [[TMP15:%.*]] +; CHECK: 7: +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 [[TMP8]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[TMP14:%.*]] ] +; CHECK-NEXT: [[TMP10:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP14]] +; CHECK: 11: +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP5]], i64 [[IV]] +; CHECK-NEXT: [[TMP13:%.*]] = ptrtoint ptr [[TMP12]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP13]], i64 4) +; CHECK-NEXT: br label [[TMP14]] +; CHECK: 14: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP9]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vluxei.nxv1i32.p0.nxv1i16.i64( poison, ptr [[TMP0]], [[TMP1]], i64 [[TMP2]]) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1353,7 +1449,31 @@ define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( @llvm.riscv.vluxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]], i64 1) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_loadN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: [[A:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]], i64 1) ; CHECK-NEXT: ret [[A]] ; entry: @@ -1377,7 +1497,31 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, < ; CHECK-LABEL: @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], i64 [[TMP3:%.*]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP16:%.*]] +; CHECK: 8: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP9]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP8]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP15]] +; CHECK: 12: +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[TMP6]], i64 [[IV]] +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4) +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP10]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]]) ; CHECK-NEXT: ret void ; entry: @@ -1401,7 +1545,31 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-LABEL: @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]]) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]]) ; CHECK-NEXT: ret void ; entry: @@ -1425,7 +1593,31 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, < ; CHECK-LABEL: @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], i64 [[TMP3:%.*]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i64 [[TMP3:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP16:%.*]] +; CHECK: 8: +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP9]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP8]] ], [ [[IV_NEXT:%.*]], [[TMP15:%.*]] ] +; CHECK-NEXT: [[TMP11:%.*]] = extractelement splat (i1 true), i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP11]], label [[TMP12:%.*]], label [[TMP15]] +; CHECK: 12: +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[TMP6]], i64 [[IV]] +; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP14]], i64 4) +; CHECK-NEXT: br label [[TMP15]] +; CHECK: 15: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP10]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], i64 [[TMP3]]) ; CHECK-NEXT: ret void ; entry: @@ -1449,7 +1641,31 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-LABEL: @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr @__asan_shadow_memory_dynamic_address, align 8 -; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1:%.*]], [[TMP2:%.*]], [[TMP3:%.*]], i64 [[TMP4:%.*]]) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[TMP2:%.*]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP1:%.*]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp ne i64 [[TMP4:%.*]], 0 +; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9:%.*]], label [[TMP17:%.*]] +; CHECK: 9: +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP4]], i64 [[TMP10]]) +; CHECK-NEXT: br label [[DOTSPLIT:%.*]] +; CHECK: .split: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP9]] ], [ [[IV_NEXT:%.*]], [[TMP16:%.*]] ] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP3:%.*]], i64 [[IV]] +; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13:%.*]], label [[TMP16]] +; CHECK: 13: +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP7]], i64 [[IV]] +; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP14]] to i64 +; CHECK-NEXT: call void @__asan_storeN(i64 [[TMP15]], i64 4) +; CHECK-NEXT: br label [[TMP16]] +; CHECK: 16: +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]] +; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]] +; CHECK: .split.split: +; CHECK-NEXT: br label [[TMP17]] +; CHECK: 17: +; CHECK-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.p0.nxv1i16.i64( [[TMP0:%.*]], ptr [[TMP1]], [[TMP2]], [[TMP3]], i64 [[TMP4]]) ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/ObjectYAML/DXContainer/RootSignature-StaticSamplers1.3.yaml b/llvm/test/ObjectYAML/DXContainer/RootSignature-StaticSamplers1.3.yaml new file mode 100644 index 0000000000000..1623b05def009 --- /dev/null +++ b/llvm/test/ObjectYAML/DXContainer/RootSignature-StaticSamplers1.3.yaml @@ -0,0 +1,65 @@ +# RUN: yaml2obj %s | obj2yaml | FileCheck %s + +--- !dxcontainer +Header: + Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ] + Version: + Major: 1 + Minor: 0 + PartCount: 1 + PartOffsets: [ 60 ] +Parts: + - Name: RTS0 + Size: 80 + RootSignature: + Version: 3 + NumRootParameters: 0 + RootParametersOffset: 24 + NumStaticSamplers: 1 + StaticSamplersOffset: 24 + Parameters: [] + Samplers: + - Filter: MinLinearMagMipPoint + AddressU: Wrap + AddressV: Mirror + AddressW: MirrorOnce + MipLODBias: 1.23 + MaxAnisotropy: 20 + ComparisonFunc: LessEqual + BorderColor: TransparentBlack + MinLOD: 4.56 + MaxLOD: 8.90 + ShaderRegister: 31 + RegisterSpace: 32 + ShaderVisibility: Mesh + SAMPLER_FLAG_UINT_BORDER_COLOR: true + AllowInputAssemblerInputLayout: true + DenyGeometryShaderRootAccess: true + +#CHECK: - Name: RTS0 +#CHECK-NEXT: Size: 80 +#CHECK-NEXT: RootSignature: +#CHECK-NEXT: Version: 3 +#CHECK-NEXT: NumRootParameters: 0 +#CHECK-NEXT: RootParametersOffset: 24 +#CHECK-NEXT: NumStaticSamplers: 1 +#CHECK-NEXT: StaticSamplersOffset: 24 +#CHECK-NEXT: Parameters: [] +#CHECK-NEXT: Samplers: +#CHECK-NEXT: - Filter: MinLinearMagMipPoint +#CHECK-NEXT: AddressU: Wrap +#CHECK-NEXT: AddressV: Mirror +#CHECK-NEXT: AddressW: MirrorOnce +#CHECK-NEXT: MipLODBias: 1.23 +#CHECK-NEXT: MaxAnisotropy: 20 +#CHECK-NEXT: ComparisonFunc: LessEqual +#CHECK-NEXT: BorderColor: TransparentBlack +#CHECK-NEXT: MinLOD: 4.56 +#CHECK-NEXT: MaxLOD: 8.9 +#CHECK-NEXT: ShaderRegister: 31 +#CHECK-NEXT: RegisterSpace: 32 +#CHECK-NEXT: ShaderVisibility: Mesh +#CHECK-NEXT: SAMPLER_FLAG_UINT_BORDER_COLOR: true +#CHECK-NEXT: AllowInputAssemblerInputLayout: true +#CHECK-NEXT: DenyGeometryShaderRootAccess: true diff --git a/llvm/test/TableGen/intrinsic-struct.td b/llvm/test/TableGen/intrinsic-struct.td index 467fd9057c183..032cdc10e74ed 100644 --- a/llvm/test/TableGen/intrinsic-struct.td +++ b/llvm/test/TableGen/intrinsic-struct.td @@ -1,22 +1,58 @@ // RUN: llvm-tblgen -gen-intrinsic-enums -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS | FileCheck %s --check-prefix=CHECK-ENUM -// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS > /dev/null 2>&1 +// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS | FileCheck %s --check-prefix=CHECK-IMPL // RUN: not llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS -DENABLE_ERROR 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR // XFAIL: vg_leak include "llvm/IR/Intrinsics.td" -// Make sure we can return up to 9 values. -// CHECK-ENUM: returns_9_results = {{[0-9]+}}, // llvm.returns.9.results -def int_returns_9_results : Intrinsic< - !listsplat(llvm_anyint_ty, 9), - [], [], "llvm.returns.9.results">; +// Make sure we can return up to 257 values. Intrinsics are in alphabetical order. +// CHECK-ENUM: returns_a0_results = {{[0-9]+}}, // llvm.returns.a0.results +// CHECK-ENUM: returns_b1_results, // llvm.returns.b1.results +// CHECK-ENUM: returns_c2_results, // llvm.returns.c2.results +// CHECK-ENUM: returns_d9_results, // llvm.returns.d9.results +// CHECK-ENUM: returns_e10_results, // llvm.returns.e10.results +// CHECK-ENUM: returns_f257_results, // llvm.returns.f257.results -#ifdef ENABLE_ERROR -// CHECK-ERROR: error: intrinsics can only return upto 9 values, 'int_returns_10_results' returns 10 values -// CHECK-ERROR-NEXT: def int_returns_10_results : Intrinsic< -def int_returns_10_results : Intrinsic< +// Make sure the encoding table is correctly generated. +// CHECK-IMPL: IIT_LongEncodingTable +// CHECK-IMPL-NEXT: 21, 255 +// CHECK-IMPL-SAME: 15, 1, 15, 9, 15, 17, 15, 25, 15, 33, 15, 41, 15, 49, 15, 57, 15, 65, 15, 73, 15, 81, +// CHECK-IMPL-NEXT: 21, 0 +// CHECK-IMPL-SAME: 15, 1, 15, 9, 0 +// CHECK-IMPL-NEXT: 21, 7 +// CHECK-IMPL-SAME: 15, 1, 15, 9, 15, 17, 15, 25, 15, 33, 15, 41, 15, 49, 15, 57, 15, 65, 0 +// CHECK-IMPL-NEXT: 21, 8 +// CHECK-IMPL-SAME: 15, 1, 15, 9, 15, 17, 15, 25, 15, 33, 15, 41, 15, 49, 15, 57, 15, 65, 15, 73, 0 +def int_returns_a0_results : Intrinsic< + [], + [], [], "llvm.returns.a0.results">; + +def int_returns_b1_results : Intrinsic< + [llvm_anyint_ty], + [], [], "llvm.returns.b1.results">; + +def int_returns_c2_results : Intrinsic< + !listsplat(llvm_anyint_ty, 2), + [], [], "llvm.returns.c2.results">; + +def int_returns_d9_results : Intrinsic< + !listsplat(llvm_anyint_ty, 9), + [], [], "llvm.returns.d9.results">; + +def int_returns_e10_results : Intrinsic< !listsplat(llvm_anyint_ty, 10), - [], [], "llvm.returns.10.results">; + [], [], "llvm.returns.e10.results">; + +def int_returns_f257_results : Intrinsic< + !listsplat(llvm_anyint_ty, 257), + [], [], "llvm.returns.f257.results">; + +#ifdef ENABLE_ERROR +// CHECK-ERROR: error: intrinsics can only return upto 257 values, 'int_returns_g258_results' returns 258 values +// CHECK-ERROR-NEXT: def int_returns_g258_results : Intrinsic< +def int_returns_g258_results : Intrinsic< + !listsplat(llvm_anyint_ty, 258), + [], [], "llvm.returns.g258.results">; #endif diff --git a/llvm/test/TableGen/intrinsic-varargs.td b/llvm/test/TableGen/intrinsic-varargs.td index 3634e16e20565..f94e1d0d6750e 100644 --- a/llvm/test/TableGen/intrinsic-varargs.td +++ b/llvm/test/TableGen/intrinsic-varargs.td @@ -3,5 +3,5 @@ include "llvm/IR/Intrinsics.td" -// CHECK: /* 0 */ 0, 29, 0, +// CHECK: /* 0 */ 0, 26, 0, def int_foo : Intrinsic<[], [llvm_vararg_ty]>; diff --git a/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll b/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll index e2a9b4eea2c7d..8a6f60ba7a204 100644 --- a/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll +++ b/llvm/test/Transforms/DropUnnecessaryAssumes/basic.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 ; RUN: opt -S -passes=drop-unnecessary-assumes < %s | FileCheck %s +declare void @use(i32 %x) +declare i32 @get() + define void @basic_dead(i32 %x) { ; CHECK-LABEL: define void @basic_dead( ; CHECK-SAME: i32 [[X:%.*]]) { @@ -180,3 +183,136 @@ define void @type_test(ptr %x) { call void @llvm.assume(i1 %test) ret void } + +define void @multiple_dead_conds(i32 %x) { +; CHECK-LABEL: define void @multiple_dead_conds( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: ret void +; + %cond1 = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond1) + %cond2 = icmp ne i32 %x, 64 + call void @llvm.assume(i1 %cond2) + ret void +} + +define void @multiple_dead_bundles(ptr %x) { +; CHECK-LABEL: define void @multiple_dead_bundles( +; CHECK-SAME: ptr [[X:%.*]]) { +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr %x, i64 8), "nonnull"(ptr %x)] + ret void +} + +; The assume is eliminated, but currently leaves behind a dead cycle. +define void @dead_cycle(i1 %loop.cond) { +; CHECK-LABEL: define void @dead_cycle( +; CHECK-SAME: i1 [[LOOP_COND:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 +; CHECK-NEXT: br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ] + %cond = icmp ne i32 %iv, 64 + call void @llvm.assume(i1 %cond) + %iv.next = add i32 %iv, 1 + br i1 %loop.cond, label %loop, label %exit + +exit: + ret void +} + +define void @use_in_side_effect(i32 %x) { +; CHECK-LABEL: define void @use_in_side_effect( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: call void @use(i32 [[X]]) +; CHECK-NEXT: ret void +; + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + call void @use(i32 %x) + ret void +} + +define void @indirect_use_in_side_effect(i32 %x) { +; CHECK-LABEL: define void @indirect_use_in_side_effect( +; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X]], 1 +; CHECK-NEXT: call void @use(i32 [[ADD]]) +; CHECK-NEXT: ret void +; + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + %add = add i32 %x, 1 + call void @use(i32 %add) + ret void +} + +; The affected value itself has a side effect, but we can still drop the +; assume. +define void @affected_value_has_side_effect() { +; CHECK-LABEL: define void @affected_value_has_side_effect() { +; CHECK-NEXT: [[X:%.*]] = call i32 @get() +; CHECK-NEXT: ret void +; + %x = call i32 @get() + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + ret void +} + +define i32 @affected_value_has_side_effect_and_is_used() { +; CHECK-LABEL: define i32 @affected_value_has_side_effect_and_is_used() { +; CHECK-NEXT: [[X:%.*]] = call i32 @get() +; CHECK-NEXT: [[COND:%.*]] = icmp sge i32 [[X]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[COND]]) +; CHECK-NEXT: ret i32 [[X]] +; + %x = call i32 @get() + %cond = icmp sge i32 %x, 0 + call void @llvm.assume(i1 %cond) + ret i32 %x +} + +@g = external global i8 +@g2 = external global i8 + +; Assumes on globals are currently not supported. +define void @assume_on_global() { +; CHECK-LABEL: define void @assume_on_global() { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @g, i64 8) ] +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr @g, i64 8)] + ret void +} + +define void @assume_on_global_used_in_other_func() { +; CHECK-LABEL: define void @assume_on_global_used_in_other_func() { +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @g2, i64 8) ] +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) ["align"(ptr @g2, i64 8)] + ret void +} + +define ptr @other_func() { +; CHECK-LABEL: define ptr @other_func() { +; CHECK-NEXT: ret ptr @g2 +; + ret ptr @g2 +} diff --git a/llvm/test/Transforms/InstCombine/fsh.ll b/llvm/test/Transforms/InstCombine/fsh.ll index 0325c60997dfd..28c541e1a9eb2 100644 --- a/llvm/test/Transforms/InstCombine/fsh.ll +++ b/llvm/test/Transforms/InstCombine/fsh.ll @@ -1214,3 +1214,75 @@ define i31 @fshr_neg_amount_non_power_two(i31 %x, i31 %y) { %r = call i31 @llvm.fshr.i31(i31 %x, i31 %x, i31 %n) ret i31 %r } + +define i32 @rot_const_consecutive(i32 %x) { +; CHECK-LABEL: @rot_const_consecutive( +; CHECK-NEXT: [[R2:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 8) +; CHECK-NEXT: ret i32 [[R2]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 13) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %r, i32 27) + ret i32 %r2 +} + +define i32 @rot_const_consecutive_multi_use(i32 %x) { +; CHECK-LABEL: @rot_const_consecutive_multi_use( +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 7) +; CHECK-NEXT: [[R3:%.*]] = call i32 @llvm.fshl.i32(i32 [[X]], i32 [[X]], i32 11) +; CHECK-NEXT: [[R2:%.*]] = and i32 [[R]], [[R3]] +; CHECK-NEXT: ret i32 [[R2]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %r, i32 4) + %and = and i32 %r, %r2 + ret i32 %and +} + +define i32 @rot_const_consecutive_cancel_out(i32 %x) { +; CHECK-LABEL: @rot_const_consecutive_cancel_out( +; CHECK-NEXT: ret i32 [[X:%.*]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %r, i32 25) + ret i32 %r2 +} + +;; negative test, consecutive rotates only fold if shift amounts are const + +define i32 @rot_nonconst_shift(i32 %x, i32 %amt) { +; CHECK-LABEL: @rot_nonconst_shift( +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 7) +; CHECK-NEXT: [[R2:%.*]] = call i32 @llvm.fshl.i32(i32 [[R]], i32 [[R]], i32 [[AMT:%.*]]) +; CHECK-NEXT: ret i32 [[R2]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %r, i32 %amt) + ret i32 %r2 +} + +;; negative test, 1st funnel shift isn't a rotate. + +define i32 @fsh_rot(i32 %x, i32 %y) { +; CHECK-LABEL: @fsh_rot( +; CHECK-NEXT: [[FSH:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[Y:%.*]], i32 7) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[FSH]], i32 [[FSH]], i32 4) +; CHECK-NEXT: ret i32 [[R]] +; + %fsh = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 7) + %r = call i32 @llvm.fshl.i32(i32 %fsh, i32 %fsh, i32 4) + ret i32 %r +} + +;; negative test, 2nd funnel shift isn't a rotate. + +define i32 @rot_fsh(i32 %x, i32 %y) { +; CHECK-LABEL: @rot_fsh( +; CHECK-NEXT: [[Y:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 7) +; CHECK-NEXT: [[R2:%.*]] = call i32 @llvm.fshl.i32(i32 [[Y]], i32 [[R:%.*]], i32 4) +; CHECK-NEXT: ret i32 [[R2]] +; + %r = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 7) + %r2 = call i32 @llvm.fshl.i32(i32 %r, i32 %y, i32 4) + ret i32 %r2 +} + diff --git a/llvm/test/Transforms/InstCombine/vector-reductions.ll b/llvm/test/Transforms/InstCombine/vector-reductions.ll index 10f4aca72dbc7..f1e0dd9bd06d7 100644 --- a/llvm/test/Transforms/InstCombine/vector-reductions.ll +++ b/llvm/test/Transforms/InstCombine/vector-reductions.ll @@ -308,3 +308,174 @@ define i32 @diff_of_sums_type_mismatch2(<8 x i32> %v0, <4 x i32> %v1) { %r = sub i32 %r0, %r1 ret i32 %r } + +define i32 @constant_multiplied_4xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_4xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @constant_multiplied_3xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_3xi32( +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <3 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <3 x i32> %2, <3 x i32> poison, <3 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v3i32(<3 x i32> %3) + ret i32 %4 +} + +define i64 @constant_multiplied_4xi64(i64 %0) { +; CHECK-LABEL: @constant_multiplied_4xi64( +; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i64 [[TMP2]] +; + %2 = insertelement <4 x i64> poison, i64 %0, i64 0 + %3 = shufflevector <4 x i64> %2, <4 x i64> poison, <4 x i32> zeroinitializer + %4 = tail call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %3) + ret i64 %4 +} + +define i32 @constant_multiplied_8xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_8xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 3 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <8 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %3) + ret i32 %4 +} + + +define i32 @constant_multiplied_16xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_16xi32( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 4 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <16 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3) + ret i32 %4 +} + + +define i32 @constant_multiplied_4xi32_at_idx1(i32 %0) { +; CHECK-LABEL: @constant_multiplied_4xi32_at_idx1( +; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP0:%.*]], 2 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 1 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, + <4 x i32> + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @negative_constant_multiplied_4xi32(i32 %0) { +; CHECK-LABEL: @negative_constant_multiplied_4xi32( +; CHECK-NEXT: ret i32 poison +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 1 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <4 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %3) + ret i32 %4 +} + +define i32 @constant_multiplied_6xi32(i32 %0) { +; CHECK-LABEL: @constant_multiplied_6xi32( +; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0:%.*]], 6 +; CHECK-NEXT: ret i32 [[TMP2]] +; + %2 = insertelement <4 x i32> poison, i32 %0, i64 0 + %3 = shufflevector <4 x i32> %2, <4 x i32> poison, <6 x i32> zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.v6i32(<6 x i32> %3) + ret i32 %4 +} + +define i64 @constant_multiplied_6xi64(i64 %0) { +; CHECK-LABEL: @constant_multiplied_6xi64( +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP0:%.*]], 6 +; CHECK-NEXT: ret i64 [[TMP2]] +; + %2 = insertelement <4 x i64> poison, i64 %0, i64 0 + %3 = shufflevector <4 x i64> %2, <4 x i64> poison, <6 x i32> zeroinitializer + %4 = tail call i64 @llvm.vector.reduce.add.v6i64(<6 x i64> %3) + ret i64 %4 +} + +define i1 @constant_multiplied_8xi1(i1 %0) { +; CHECK-LABEL: @constant_multiplied_8xi1( +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i1> poison, i1 [[TMP0:%.*]], i64 0 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i1> [[TMP2]], <8 x i1> poison, <8 x i32> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i1> [[TMP3]] to i8 +; CHECK-NEXT: [[TMP5:%.*]] = call range(i8 0, 9) i8 @llvm.ctpop.i8(i8 [[TMP4]]) +; CHECK-NEXT: [[TMP6:%.*]] = trunc i8 [[TMP5]] to i1 +; CHECK-NEXT: ret i1 [[TMP6]] +; + %2 = insertelement <8 x i1> poison, i1 %0, i32 0 + %3 = shufflevector <8 x i1> %2, <8 x i1> poison, <8 x i32> zeroinitializer + %4 = tail call i1 @llvm.vector.reduce.add.v8i1(<8 x i1> %3) + ret i1 %4 +} + +define i2 @constant_multiplied_4xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_4xi2( +; CHECK-NEXT: ret i2 0 +; + %2 = insertelement <4 x i2> poison, i2 %0, i32 0 + %3 = shufflevector <4 x i2> %2, <4 x i2> poison, <4 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v4i2(<4 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_5xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_5xi2( +; CHECK-NEXT: ret i2 [[TMP0:%.*]] +; + %2 = insertelement <5 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <5 x i2> %2, <5 x i2> poison, <5 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v5i2(<5 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_6xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_6xi2( +; CHECK-NEXT: [[TMP2:%.*]] = shl i2 [[TMP0:%.*]], 1 +; CHECK-NEXT: ret i2 [[TMP2]] +; + %2 = insertelement <6 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <6 x i2> %2, <6 x i2> poison, <6 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v6i2(<6 x i2> %3) + ret i2 %4 +} + +define i2 @constant_multiplied_7xi2(i2 %0) { +; CHECK-LABEL: @constant_multiplied_7xi2( +; CHECK-NEXT: [[TMP2:%.*]] = sub i2 0, [[TMP0:%.*]] +; CHECK-NEXT: ret i2 [[TMP2]] +; + %2 = insertelement <7 x i2> poison, i2 %0, i64 0 + %3 = shufflevector <7 x i2> %2, <7 x i2> poison, <7 x i32> zeroinitializer + %4 = tail call i2 @llvm.vector.reduce.add.v7i2(<7 x i2> %3) + ret i2 %4 +} + +define i32 @negative_scalable_vector(i32 %0) { +; CHECK-LABEL: @negative_scalable_vector( +; CHECK-NEXT: [[TMP2:%.*]] = insertelement poison, i32 [[TMP0:%.*]], i64 0 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector [[TMP2]], poison, zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = tail call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP3]]) +; CHECK-NEXT: ret i32 [[TMP4]] +; + %2 = insertelement poison, i32 %0, i64 0 + %3 = shufflevector %2, poison, zeroinitializer + %4 = tail call i32 @llvm.vector.reduce.add.nxv4i32( %3) + ret i32 %4 +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll index b157a2818e676..157b78704234a 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call-scalarize.ll @@ -109,36 +109,35 @@ define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 { ; TFA_INTERLEAVE-NEXT: [[TMP11:%.*]] = insertelement <2 x double> poison, double [[TMP9]], i32 0 ; TFA_INTERLEAVE-NEXT: [[TMP12:%.*]] = insertelement <2 x double> [[TMP11]], double [[TMP9]], i32 1 ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = fcmp ogt <2 x double> [[TMP12]], zeroinitializer -; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP14]], <2 x double> zeroinitializer, <2 x double> splat (double 1.000000e+00) ; TFA_INTERLEAVE-NEXT: [[PREDPHI3:%.*]] = select <2 x i1> [[TMP14]], <2 x double> zeroinitializer, <2 x double> splat (double 1.000000e+00) ; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] ; TFA_INTERLEAVE: pred.store.if: -; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 0 +; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 0 ; TFA_INTERLEAVE-NEXT: store double [[TMP20]], ptr [[P:%.*]], align 8 ; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE]] ; TFA_INTERLEAVE: pred.store.continue: ; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP29]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]] -; TFA_INTERLEAVE: pred.store.if4: -; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[PREDPHI]], i32 1 +; TFA_INTERLEAVE: pred.store.if3: +; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 1 ; TFA_INTERLEAVE-NEXT: store double [[TMP22]], ptr [[P]], align 8 ; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE5]] -; TFA_INTERLEAVE: pred.store.continue5: +; TFA_INTERLEAVE: pred.store.continue4: ; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK2]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP31]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]] -; TFA_INTERLEAVE: pred.store.if6: +; TFA_INTERLEAVE: pred.store.if5: ; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 0 ; TFA_INTERLEAVE-NEXT: store double [[TMP32]], ptr [[P]], align 8 ; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE7]] -; TFA_INTERLEAVE: pred.store.continue7: +; TFA_INTERLEAVE: pred.store.continue6: ; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK2]], i32 1 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9]] -; TFA_INTERLEAVE: pred.store.if8: +; TFA_INTERLEAVE: pred.store.if7: ; TFA_INTERLEAVE-NEXT: [[TMP34:%.*]] = extractelement <2 x double> [[PREDPHI3]], i32 1 ; TFA_INTERLEAVE-NEXT: store double [[TMP34]], ptr [[P]], align 8 ; TFA_INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE9]] -; TFA_INTERLEAVE: pred.store.continue9: +; TFA_INTERLEAVE: pred.store.continue8: ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = add i64 [[INDEX]], 2 ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX]], i64 [[TMP3]]) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll index 4a1d56572c013..d8a81f9316e4b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll @@ -960,13 +960,11 @@ define void @test_widen_exp_v2(ptr noalias %p2, ptr noalias %p, i64 %n) #5 { ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = load double, ptr [[P2]], align 8 ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = tail call double @llvm.exp.f64(double [[TMP4]]) #[[ATTR7:[0-9]+]] ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = fcmp ogt double [[TMP6]], 0.000000e+00 -; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP8]], double 0.000000e+00, double 1.000000e+00 ; TFA_INTERLEAVE-NEXT: [[PREDPHI3:%.*]] = select i1 [[TMP8]], double 0.000000e+00, double 1.000000e+00 -; TFA_INTERLEAVE-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[ACTIVE_LANE_MASK2]], double [[PREDPHI3]], double [[PREDPHI]] ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = or i1 [[ACTIVE_LANE_MASK]], [[ACTIVE_LANE_MASK2]] ; TFA_INTERLEAVE-NEXT: br i1 [[TMP14]], label %[[BB8:.*]], label %[[TMP9]] ; TFA_INTERLEAVE: [[BB8]]: -; TFA_INTERLEAVE-NEXT: store double [[SPEC_SELECT]], ptr [[P]], align 8 +; TFA_INTERLEAVE-NEXT: store double [[PREDPHI3]], ptr [[P]], align 8 ; TFA_INTERLEAVE-NEXT: br label %[[TMP9]] ; TFA_INTERLEAVE: [[TMP9]]: ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll index 46ba7f645a03e..6c36dfb81311b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll @@ -16,7 +16,6 @@ define void @load_store_interleave_group(ptr noalias %data) { ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -24,7 +23,7 @@ define void @load_store_interleave_group(ptr noalias %data) { ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP0]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP1]], align 8 ; CHECK-NEXT: store [[WIDE_LOAD]], ptr [[TMP1]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: @@ -66,7 +65,6 @@ define void @test_2xi64_unary_op_load_interleave_group(ptr noalias %data, ptr no ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1111, [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1111, [[N_MOD_VF]] -; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -75,7 +73,7 @@ define void @test_2xi64_unary_op_load_interleave_group(ptr noalias %data, ptr no ; CHECK-NEXT: [[TMP7:%.*]] = load , ptr [[TMP1]], align 8 ; CHECK-NEXT: [[TMP9:%.*]] = fneg [[TMP7]] ; CHECK-NEXT: store [[TMP9]], ptr [[TMP1]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll index 658b9a4569191..1540baab53719 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll @@ -1679,8 +1679,7 @@ define i64 @test_std_q31(ptr %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64> ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP2]]) ; CHECK-NEXT: [[TMP4]] = add i64 [[VEC_PHI]], [[TMP3]] -; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64> -; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <4 x i64> [[TMP5]], [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <4 x i64> [[TMP2]], [[TMP2]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]]) ; CHECK-NEXT: [[TMP8]] = add i64 [[VEC_PHI1]], [[TMP7]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll index fde18785ce2c4..612e7c083bda1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll @@ -27,8 +27,7 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 { ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP20]], splat (i1 true), i32 [[TMP12]]) -; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] @@ -101,8 +100,7 @@ define void @block_with_dead_inst_2(ptr %src) #0 { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP10]], splat (i1 true), i32 [[TMP9]]) -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] @@ -175,8 +173,7 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP10]], splat (i1 true), i32 [[TMP9]]) -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] @@ -259,8 +256,7 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 { ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP20]], splat (i1 true), i32 [[TMP12]]) -; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP12]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] @@ -345,8 +341,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[SRC]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP10]], splat (i1 true), i32 [[TMP9]]) -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -449,8 +444,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 % ; CHECK-NEXT: [[TMP23:%.*]] = select [[TMP17]], [[BROADCAST_SPLAT]], zeroinitializer ; CHECK-NEXT: [[TMP24:%.*]] = or [[TMP22]], [[TMP23]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP20]], [[TMP24]], i32 [[TMP27]]) -; CHECK-NEXT: [[TMP25:%.*]] = zext i32 [[TMP27]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] @@ -708,8 +702,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[DST]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( zeroinitializer, align 4 [[TMP21]], splat (i1 true), i32 [[TMP18]]), !alias.scope [[META10:![0-9]+]], !noalias [[META13:![0-9]+]] -; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP18]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll index 43fef428372dd..96c3a0d1a2f01 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll @@ -36,8 +36,7 @@ define void @dead_load(ptr %p, i16 %start) { ; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[P]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP21]], splat (i1 true), i32 [[TMP16]]) -; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP16]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT2]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] @@ -321,8 +320,7 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) { ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[DST]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP19]], align 4 [[TMP16]], splat (i1 true), i32 [[TMP8]]) -; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP8]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] @@ -403,9 +401,8 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s ; CHECK-NEXT: [[TMP18:%.*]] = zext [[TMP17]] to ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[DST]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP18]], align 4 [[TMP19]], splat (i1 true), i32 [[TMP10]]) -; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll index 663ead8c13cbd..0a605563e45a9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll @@ -24,9 +24,8 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]] ; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[VEC_IND]], ptr align 8 [[TMP14]], splat (i1 true), i32 [[TMP11]]) -; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP11]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP12]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] @@ -80,8 +79,7 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) { ; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP7]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = mul i64 8, [[TMP10]] +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 8, [[TMP9]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll index ea2ccb07b388b..5df4f703c1b1b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll @@ -111,8 +111,7 @@ define void @predicated_strided_store(ptr %start) { ; RVA23-NEXT: [[TMP4:%.*]] = mul [[VEC_IND]], splat (i64 7) ; RVA23-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START:%.*]], [[TMP4]] ; RVA23-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0( zeroinitializer, align 1 [[TMP5]], splat (i1 true), i32 [[TMP2]]) -; RVA23-NEXT: [[TMP6:%.*]] = zext i32 [[TMP2]] to i64 -; RVA23-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] +; RVA23-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; RVA23-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; RVA23-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RVA23-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] @@ -149,8 +148,7 @@ define void @predicated_strided_store(ptr %start) { ; RVA23ZVL1024B-NEXT: [[TMP4:%.*]] = mul [[VEC_IND]], splat (i64 7) ; RVA23ZVL1024B-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[START:%.*]], [[TMP4]] ; RVA23ZVL1024B-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0( zeroinitializer, align 1 [[TMP5]], splat (i1 true), i32 [[TMP2]]) -; RVA23ZVL1024B-NEXT: [[TMP6:%.*]] = zext i32 [[TMP2]] to i64 -; RVA23ZVL1024B-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] +; RVA23ZVL1024B-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; RVA23ZVL1024B-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; RVA23ZVL1024B-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RVA23ZVL1024B-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] @@ -213,8 +211,7 @@ define void @store_to_addr_generated_from_invariant_addr(ptr noalias %p0, ptr no ; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i32.nxv2p0( zeroinitializer, align 4 [[TMP7]], splat (i1 true), i32 [[TMP3]]) ; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i32.nxv2p0( zeroinitializer, align 4 [[TMP7]], splat (i1 true), i32 [[TMP3]]) ; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0( zeroinitializer, align 1 [[TMP7]], splat (i1 true), i32 [[TMP3]]) -; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP3]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP4]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll index d3db55e251870..4d97a659e94e9 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll @@ -69,8 +69,7 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 { ; CHECK-NEXT: [[DOTSPLAT25:%.*]] = shufflevector [[DOTSPLATINSERT24]], poison, zeroinitializer ; CHECK-NEXT: [[TMP59:%.*]] = getelementptr i16, ptr [[A]], [[VEC_IND]] ; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 [[TMP59]], splat (i1 true), i32 [[TMP27]]), !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] -; CHECK-NEXT: [[TMP47:%.*]] = zext i32 [[TMP27]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP47]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT25]] ; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -120,7 +119,172 @@ exit: ret void } +define void @test_3_inductions(ptr noalias %dst, ptr noalias %src, i64 %n) #1 { +; CHECK-LABEL: define void @test_3_inductions( +; CHECK-SAME: ptr noalias [[DST:%.*]], ptr noalias [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[DST]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP2:%.*]] = mul [[TMP1]], splat (i32 2) +; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i32 1), [[TMP2]] +; CHECK-NEXT: [[INDUCTION1:%.*]] = add zeroinitializer, [[TMP2]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND2:%.*]] = phi [ [[INDUCTION1]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT5:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP4:%.*]] = mul i32 2, [[TMP3]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, i32 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = or [[VEC_IND2]], [[VEC_IND]] +; CHECK-NEXT: [[TMP6:%.*]] = sext [[TMP5]] to +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], [[TMP6]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv2p0.nxv2p0( [[TMP7]], align 8 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP3]] to i64 +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[VEC_IND_NEXT5]] = add [[VEC_IND2]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[SCALAR_PH:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV_0:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[IV_0_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_OR:%.*]] = or i32 [[IV_2]], [[IV_0]] +; CHECK-NEXT: [[IV_OR_EXT:%.*]] = sext i32 [[IV_OR]] to i64 +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV_OR_EXT]] +; CHECK-NEXT: store ptr [[GEP_SRC]], ptr [[DST]], align 8 +; CHECK-NEXT: [[IV_0_NEXT]] = add i32 [[IV_0]], 2 +; CHECK-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1 +; CHECK-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 2 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_1]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv.0 = phi i32 [ 1, %entry ], [ %iv.0.next, %loop ] + %iv.1 = phi i64 [ 0, %entry ], [ %iv.1.next, %loop ] + %iv.2 = phi i32 [ 0, %entry ], [ %iv.2.next, %loop ] + %iv.or = or i32 %iv.2, %iv.0 + %iv.or.ext = sext i32 %iv.or to i64 + %gep.src = getelementptr i8, ptr %src, i64 %iv.or.ext + store ptr %gep.src, ptr %dst, align 8 + %iv.0.next = add i32 %iv.0, 2 + %iv.1.next = add i64 %iv.1, 1 + %iv.2.next = add i32 %iv.2, 2 + %ec = icmp eq i64 %iv.1, %n + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +define void @redundant_iv_trunc_for_cse(ptr noalias %src, ptr noalias %dst, i64 %n) #0 { +; CHECK-LABEL: define void @redundant_iv_trunc_for_cse( +; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv4i32() +; CHECK-NEXT: [[TMP2:%.*]] = mul [[TMP1]], splat (i32 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP2]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND1:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TMP3]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP4]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq [[VP_OP_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = shl [[VEC_IND1]], splat (i32 16) +; CHECK-NEXT: [[PREDPHI:%.*]] = select [[TMP5]], [[TMP6]], [[VEC_IND]] +; CHECK-NEXT: [[TMP7:%.*]] = trunc [[PREDPHI]] to +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[EVL_BASED_IV]] +; CHECK-NEXT: call void @llvm.vp.store.nxv4i8.p0( [[TMP7]], ptr align 1 [[TMP8]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP3]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[VEC_IND_NEXT2]] = add [[VEC_IND1]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[SCALAR_PH:.*]]: +; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] +; CHECK: [[LOOP_HEADER]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4 +; CHECK-NEXT: [[C_0:%.*]] = icmp eq i32 [[L]], 0 +; CHECK-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32 +; CHECK-NEXT: br i1 [[C_0]], label %[[THEN:.*]], label %[[LOOP_LATCH]] +; CHECK: [[THEN]]: +; CHECK-NEXT: [[TRUNC_IV_2:%.*]] = trunc i64 [[IV]] to i32 +; CHECK-NEXT: [[SHL_IV:%.*]] = shl i32 [[TRUNC_IV_2]], 16 +; CHECK-NEXT: br label %[[LOOP_LATCH]] +; CHECK: [[LOOP_LATCH]]: +; CHECK-NEXT: [[P:%.*]] = phi i32 [ [[SHL_IV]], %[[THEN]] ], [ [[TRUNC_IV]], %[[LOOP_HEADER]] ] +; CHECK-NEXT: [[TRUNC_P:%.*]] = trunc i32 [[P]] to i8 +; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]] +; CHECK-NEXT: store i8 [[TRUNC_P]], ptr [[GEP_DST]], align 1 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.src = getelementptr inbounds i32, ptr %src, i64 %iv + %l = load i32, ptr %gep.src + %c.0 = icmp eq i32 %l, 0 + %trunc.iv = trunc i64 %iv to i32 + br i1 %c.0, label %then, label %loop.latch + +then: + %trunc.iv.2 = trunc i64 %iv to i32 + %shl.iv = shl i32 %trunc.iv.2, 16 + br label %loop.latch + +loop.latch: + %p = phi i32 [ %shl.iv, %then ], [ %trunc.iv, %loop.header ] + %trunc.p = trunc i32 %p to i8 + %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv + store i8 %trunc.p, ptr %gep.dst, align 1 + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, %n + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + + + attributes #0 = { "target-features"="+64bit,+v,+zvl256b" } +attributes #1 = { "target-cpu"="sifive-p670" } ;. ; CHECK: [[META0]] = !{[[META1:![0-9]+]]} ; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} @@ -132,4 +296,6 @@ attributes #0 = { "target-features"="+64bit,+v,+zvl256b" } ; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"} ; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META7]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META7]], [[META8]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META7]], [[META8]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll index 86b3a7e32c852..ae6c90c5ce188 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll @@ -34,9 +34,8 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) { ; VLENUNK-NEXT: [[TMP17:%.*]] = add [[PREDPHI]], [[BROADCAST_SPLAT]] ; VLENUNK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]] ; VLENUNK-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP17]], ptr align 4 [[TMP18]], splat (i1 true), i32 [[TMP7]]) -; VLENUNK-NEXT: [[TMP19:%.*]] = zext i32 [[TMP7]] to i64 -; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP19]], [[INDEX]] -; VLENUNK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]] +; VLENUNK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] +; VLENUNK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] ; VLENUNK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; VLENUNK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; VLENUNK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll index 7d32302abfe24..89819f2be4967 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll @@ -52,8 +52,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; RV32-NEXT: [[TMP18:%.*]] = fadd [[WIDE_MASKED_GATHER6]], [[TMP17]] ; RV32-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], [[VEC_IND]] ; RV32-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0( [[TMP18]], align 8 [[TMP19]], [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] -; RV32-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 -; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] +; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; RV32-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; RV32-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV32-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] @@ -121,8 +120,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea ; RV64-NEXT: [[TMP18:%.*]] = fadd [[WIDE_MASKED_GATHER6]], [[TMP17]] ; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A]], [[VEC_IND]] ; RV64-NEXT: call void @llvm.vp.scatter.nxv2f64.nxv2p0( [[TMP18]], align 8 [[TMP19]], [[TMP14]], i32 [[TMP10]]), !alias.scope [[META5:![0-9]+]], !noalias [[META7:![0-9]+]] -; RV64-NEXT: [[TMP20:%.*]] = zext i32 [[TMP10]] to i64 -; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] +; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; RV64-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; RV64-NEXT: [[TMP24:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; RV64-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll index d0068e134fd32..782c2f6c24fa4 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll @@ -43,9 +43,8 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64 ; CHECK-NEXT: [[PREDPHI:%.*]] = select i1 [[TMP23]], i64 poison, i64 [[INDEX]] ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i16, ptr [[ARG]], i64 [[PREDPHI]] ; CHECK-NEXT: call void @llvm.vp.store.nxv8i16.p0( zeroinitializer, ptr align 2 [[TMP24]], [[TMP22]], i32 [[TMP25]]) -; CHECK-NEXT: [[TMP26:%.*]] = zext i32 [[TMP25]] to i64 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP26]], [[INDEX]] -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP26]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll index 5876a6bf32848..93c0a7455165b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/reg-usage-prune-vf.ll @@ -12,11 +12,9 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; CHECK-NEXT: [[TMP0:%.*]] = call @llvm.stepvector.nxv4i64() ; CHECK-NEXT: [[TMP1:%.*]] = mul [[TMP0]], splat (i64 2) ; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP3:%.*]] = mul [[TMP2]], splat (i64 3) +; CHECK-NEXT: [[TMP3:%.*]] = mul [[TMP0]], splat (i64 3) ; CHECK-NEXT: [[INDUCTION1:%.*]] = add zeroinitializer, [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = call @llvm.stepvector.nxv4i64() -; CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP4]], splat (i64 4) +; CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP0]], splat (i64 4) ; CHECK-NEXT: [[INDUCTION2:%.*]] = add zeroinitializer, [[TMP5]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: @@ -30,12 +28,10 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; CHECK-NEXT: [[TMP8:%.*]] = mul i64 4, [[TMP7]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TMP8]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP6]] to i64 -; CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP7]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i64 [[TMP10]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP6]] to i64 -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP7]] ; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i64 [[TMP12]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer ; CHECK-NEXT: [[TMP13:%.*]] = sub [[VEC_IND]], splat (i64 1) @@ -53,9 +49,8 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; CHECK-NEXT: [[INTERLEAVE_EVL:%.*]] = mul nuw nsw i32 [[TMP6]], 3 ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave3.nxv12i8( [[WIDE_MASKED_GATHER]], [[WIDE_MASKED_GATHER9]], [[WIDE_MASKED_GATHER10]]) ; CHECK-NEXT: call void @llvm.vp.store.nxv12i8.p0( [[INTERLEAVED_VEC]], ptr align 1 [[TMP21]], splat (i1 true), i32 [[INTERLEAVE_EVL]]) -; CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP6]] to i64 -; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]] -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT8]] ; CHECK-NEXT: [[VEC_IND_NEXT11]] = add [[VEC_IND3]], [[BROADCAST_SPLAT6]] ; CHECK-NEXT: [[VEC_IND_NEXT12]] = add [[VEC_IND4]], [[BROADCAST_SPLAT]] @@ -104,11 +99,9 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; NO-REG-PRESSURE-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.stepvector.nxv8i64() ; NO-REG-PRESSURE-CHECK-NEXT: [[TMP1:%.*]] = mul [[TMP0]], splat (i64 2) ; NO-REG-PRESSURE-CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP1]] -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.stepvector.nxv8i64() -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP3:%.*]] = mul [[TMP2]], splat (i64 3) +; NO-REG-PRESSURE-CHECK-NEXT: [[TMP3:%.*]] = mul [[TMP0]], splat (i64 3) ; NO-REG-PRESSURE-CHECK-NEXT: [[INDUCTION1:%.*]] = add zeroinitializer, [[TMP3]] -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.stepvector.nxv8i64() -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP4]], splat (i64 4) +; NO-REG-PRESSURE-CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP0]], splat (i64 4) ; NO-REG-PRESSURE-CHECK-NEXT: [[INDUCTION2:%.*]] = add zeroinitializer, [[TMP5]] ; NO-REG-PRESSURE-CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; NO-REG-PRESSURE-CHECK: [[VECTOR_BODY]]: @@ -122,12 +115,10 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; NO-REG-PRESSURE-CHECK-NEXT: [[TMP8:%.*]] = mul i64 4, [[TMP7]] ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TMP8]], i64 0 ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP6]] to i64 -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP9]] +; NO-REG-PRESSURE-CHECK-NEXT: [[TMP10:%.*]] = mul i64 3, [[TMP7]] ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement poison, i64 [[TMP10]], i64 0 ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector [[BROADCAST_SPLATINSERT5]], poison, zeroinitializer -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP6]] to i64 -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP11]] +; NO-REG-PRESSURE-CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP7]] ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i64 [[TMP12]], i64 0 ; NO-REG-PRESSURE-CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer ; NO-REG-PRESSURE-CHECK-NEXT: [[TMP13:%.*]] = sub [[VEC_IND]], splat (i64 1) @@ -145,9 +136,8 @@ define void @f(ptr noalias %p0, ptr noalias %p1, ptr noalias %p2) { ; NO-REG-PRESSURE-CHECK-NEXT: [[INTERLEAVE_EVL:%.*]] = mul nuw nsw i32 [[TMP6]], 3 ; NO-REG-PRESSURE-CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave3.nxv24i8( [[WIDE_MASKED_GATHER]], [[WIDE_MASKED_GATHER9]], [[WIDE_MASKED_GATHER10]]) ; NO-REG-PRESSURE-CHECK-NEXT: call void @llvm.vp.store.nxv24i8.p0( [[INTERLEAVED_VEC]], ptr align 1 [[TMP21]], splat (i1 true), i32 [[INTERLEAVE_EVL]]) -; NO-REG-PRESSURE-CHECK-NEXT: [[TMP22:%.*]] = zext i32 [[TMP6]] to i64 -; NO-REG-PRESSURE-CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[EVL_BASED_IV]] -; NO-REG-PRESSURE-CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]] +; NO-REG-PRESSURE-CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]] +; NO-REG-PRESSURE-CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT8]] ; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT11]] = add [[VEC_IND3]], [[BROADCAST_SPLAT6]] ; NO-REG-PRESSURE-CHECK-NEXT: [[VEC_IND_NEXT12]] = add [[VEC_IND4]], [[BROADCAST_SPLAT]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll index 4c84913eea23d..ca1c710e614f3 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll @@ -26,8 +26,7 @@ define void @single_constant_stride_int_scaled(ptr %p) { ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP15]], splat (i1 true), i32 [[TMP11]]) ; CHECK-NEXT: [[TMP16:%.*]] = add [[WIDE_MASKED_GATHER]], splat (i32 1) ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP16]], align 4 [[TMP15]], splat (i1 true), i32 [[TMP11]]) -; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] @@ -143,8 +142,7 @@ define void @single_constant_stride_int_iv(ptr %p) { ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP12]], splat (i1 true), i32 [[TMP7]]) ; CHECK-NEXT: [[TMP13:%.*]] = add [[WIDE_MASKED_GATHER]], splat (i32 1) ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP13]], align 4 [[TMP12]], splat (i1 true), i32 [[TMP7]]) -; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP7]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] @@ -261,8 +259,7 @@ define void @single_constant_stride_ptr_iv(ptr %p) { ; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP20]], align 4 [[VECTOR_GEP]], splat (i1 true), i32 [[TMP11]]) ; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64 ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP11]] to i64 -; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP9]] ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] @@ -834,8 +831,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[TMP20:%.*]] = add [[WIDE_MASKED_GATHER]], splat (i32 1) ; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], [[TMP18]] ; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP20]], align 4 [[TMP21]], splat (i1 true), i32 [[TMP43]]), !alias.scope [[META9:![0-9]+]], !noalias [[META6]] -; STRIDED-NEXT: [[TMP46:%.*]] = zext i32 [[TMP43]] to i64 -; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP46]] +; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP44]] ; STRIDED-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; STRIDED-NEXT: [[TMP41:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; STRIDED-NEXT: br i1 [[TMP41]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] @@ -1184,21 +1180,16 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector [[DOTSPLATINSERT9]], poison, zeroinitializer ; STRIDED-NEXT: [[TMP18:%.*]] = mul [[TMP19]], [[DOTSPLAT10]] ; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI11]], [[TMP18]] -; STRIDED-NEXT: [[TMP27:%.*]] = call @llvm.stepvector.nxv4i64() -; STRIDED-NEXT: [[TMP21:%.*]] = mul [[TMP27]], [[DOTSPLAT10]] -; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP21]] +; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP18]] ; STRIDED-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) ; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[VECTOR_GEP7]], splat (i1 true), i32 [[TMP14]]), !alias.scope [[META13:![0-9]+]] ; STRIDED-NEXT: [[TMP30:%.*]] = add [[WIDE_MASKED_GATHER]], splat (i32 1) ; STRIDED-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[TMP30]], align 4 [[VECTOR_GEP]], splat (i1 true), i32 [[TMP14]]), !alias.scope [[META16:![0-9]+]], !noalias [[META13]] ; STRIDED-NEXT: [[TMP16:%.*]] = zext i32 [[TMP14]] to i64 ; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]] -; STRIDED-NEXT: [[TMP20:%.*]] = zext i32 [[TMP14]] to i64 -; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP20]] +; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP16]] ; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP25]] -; STRIDED-NEXT: [[TMP22:%.*]] = zext i32 [[TMP14]] to i64 -; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP22]] -; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP17]] +; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP25]] ; STRIDED-NEXT: [[TMP23:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; STRIDED-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; STRIDED: middle.block: @@ -1270,9 +1261,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-UF2-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector [[BROADCAST_SPLATINSERT10]], poison, zeroinitializer ; STRIDED-UF2-NEXT: [[TMP16:%.*]] = mul [[TMP15]], [[BROADCAST_SPLAT11]] ; STRIDED-UF2-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI9]], [[TMP16]] -; STRIDED-UF2-NEXT: [[TMP17:%.*]] = call @llvm.stepvector.nxv4i64() -; STRIDED-UF2-NEXT: [[TMP18:%.*]] = mul [[TMP17]], [[BROADCAST_SPLAT11]] -; STRIDED-UF2-NEXT: [[VECTOR_GEP12:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP18]] +; STRIDED-UF2-NEXT: [[VECTOR_GEP12:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], [[TMP16]] ; STRIDED-UF2-NEXT: [[STEP_ADD:%.*]] = getelementptr i8, [[VECTOR_GEP12]], [[TMP14]] ; STRIDED-UF2-NEXT: [[STEP_ADD13:%.*]] = getelementptr i8, [[VECTOR_GEP]], [[TMP14]] ; STRIDED-UF2-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[VECTOR_GEP12]], i32 4, splat (i1 true), poison), !alias.scope [[META15:![0-9]+]] @@ -1284,8 +1273,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) { ; STRIDED-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] ; STRIDED-UF2-NEXT: [[TMP21:%.*]] = mul i64 [[STRIDE]], [[TMP9]] ; STRIDED-UF2-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP21]] -; STRIDED-UF2-NEXT: [[TMP22:%.*]] = mul i64 [[STRIDE]], [[TMP9]] -; STRIDED-UF2-NEXT: [[PTR_IND15]] = getelementptr i8, ptr [[POINTER_PHI9]], i64 [[TMP22]] +; STRIDED-UF2-NEXT: [[PTR_IND15]] = getelementptr i8, ptr [[POINTER_PHI9]], i64 [[TMP21]] ; STRIDED-UF2-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; STRIDED-UF2-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; STRIDED-UF2: middle.block: @@ -1363,9 +1351,8 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) { ; NOSTRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv2i64.nxv2p0( align 8 [[TMP4]], splat (i1 true), i32 [[TMP2]]) ; NOSTRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]] ; NOSTRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], splat (i1 true), i32 [[TMP2]]) -; NOSTRIDED-NEXT: [[TMP6:%.*]] = zext i32 [[TMP2]] to i64 -; NOSTRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP6]], [[EVL_BASED_IV]] -; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] +; NOSTRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP3]], [[EVL_BASED_IV]] +; NOSTRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; NOSTRIDED-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; NOSTRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; NOSTRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] @@ -1459,9 +1446,8 @@ define void @constant_stride_reinterpret(ptr noalias %in, ptr noalias %out) { ; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv2i64.nxv2p0( align 8 [[TMP4]], splat (i1 true), i32 [[TMP2]]) ; STRIDED-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i64, ptr [[OUT:%.*]], i64 [[EVL_BASED_IV]] ; STRIDED-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[WIDE_MASKED_GATHER]], ptr align 8 [[TMP5]], splat (i1 true), i32 [[TMP2]]) -; STRIDED-NEXT: [[TMP6:%.*]] = zext i32 [[TMP2]] to i64 -; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP6]], [[EVL_BASED_IV]] -; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP6]] +; STRIDED-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP3]], [[EVL_BASED_IV]] +; STRIDED-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; STRIDED-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; STRIDED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; STRIDED-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll index c10bfc32e6e29..6652fefb35d60 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll @@ -1199,9 +1199,8 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) { ; IF-EVL-NEXT: [[TMP15:%.*]] = ptrtoint [[TMP14]] to ; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[TMP15]], ptr align 8 [[TMP16]], splat (i1 true), i32 [[TMP11]]) -; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64 -; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]] -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]] +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP12]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll index 55facaa96631e..b153328663184 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll @@ -609,9 +609,8 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) { ; IF-EVL-NEXT: [[TMP15:%.*]] = call @llvm.experimental.vp.splice.nxv2i64( [[VECTOR_RECUR]], [[TMP20]], i32 -1, splat (i1 true), i32 [[PREV_EVL]], i32 [[TMP11]]) ; IF-EVL-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[EVL_BASED_IV]] ; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[TMP15]], ptr align 8 [[TMP9]], splat (i1 true), i32 [[TMP11]]) -; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP11]] to i64 -; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]] -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]] +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP7]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll index a23933c7fb005..7c05f4613b575 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll @@ -153,8 +153,7 @@ define i32 @load_factor_4_with_gap(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP10]], splat (i1 true), i32 [[TMP4]]) ; IF-EVL-NEXT: [[TMP11:%.*]] = add [[TMP9]], [[WIDE_MASKED_GATHER2]] ; IF-EVL-NEXT: [[TMP12]] = call @llvm.vp.merge.nxv4i32( splat (i1 true), [[TMP11]], [[VEC_PHI]], i32 [[TMP4]]) -; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP4]] to i64 -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -425,8 +424,7 @@ define i32 @load_factor_4_with_tail_gap(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP10]], splat (i1 true), i32 [[TMP4]]) ; IF-EVL-NEXT: [[TMP11:%.*]] = add [[TMP9]], [[WIDE_MASKED_GATHER2]] ; IF-EVL-NEXT: [[TMP12]] = call @llvm.vp.merge.nxv4i32( splat (i1 true), [[TMP11]], [[VEC_PHI]], i32 [[TMP4]]) -; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP4]] to i64 -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] @@ -696,8 +694,7 @@ define i32 @load_factor_4_reverse(i64 %n, ptr noalias %a) { ; IF-EVL-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[TMP14]], splat (i1 true), i32 [[TMP6]]) ; IF-EVL-NEXT: [[TMP15:%.*]] = add [[TMP13]], [[WIDE_MASKED_GATHER5]] ; IF-EVL-NEXT: [[TMP16]] = call @llvm.vp.merge.nxv4i32( splat (i1 true), [[TMP15]], [[VEC_PHI]], i32 [[TMP6]]) -; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP6]] to i64 -; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]] +; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] ; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index adfecdff8fcc8..9095d6e87ad4f 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -285,9 +285,8 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; SCALABLE-NEXT: [[PREDPHI:%.*]] = select [[TMP10]], [[WIDE_MASKED_GATHER]], zeroinitializer ; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0( [[PREDPHI]], ptr align 8 [[TMP12]], splat (i1 true), i32 [[TMP17]]) -; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP17]] to i64 -; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] -; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -383,9 +382,8 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca ; TF-SCALABLE-NEXT: [[PREDPHI:%.*]] = select [[TMP10]], [[WIDE_MASKED_GATHER]], zeroinitializer ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0( [[PREDPHI]], ptr align 8 [[TMP12]], splat (i1 true), i32 [[TMP7]]) -; TF-SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64 -; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] -; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; TF-SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] @@ -698,9 +696,8 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[VEC_IND]], align 8 [[BROADCAST_SPLAT1]], splat (i1 true), i32 [[TMP7]]) ; SCALABLE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP10]] ; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[TMP16]], splat (i1 true), i32 [[TMP7]]) -; SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP7]] to i64 -; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[TMP10]] -; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[TMP10]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] ; SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] @@ -780,9 +777,8 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[VEC_IND]], align 8 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP9]]) ; TF-SCALABLE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT3]], ptr align 8 [[TMP10]], splat (i1 true), i32 [[TMP9]]) -; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64 -; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] -; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] @@ -843,9 +839,8 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[BROADCAST_SPLAT1]], align 8 [[BROADCAST_SPLAT2]], [[TMP10]], i32 [[TMP7]]) ; SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], splat (i1 true), i32 [[TMP7]]) -; SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64 -; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]] -; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]] +; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]] +; SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] ; SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; SCALABLE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; SCALABLE-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] @@ -940,9 +935,8 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc ; TF-SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[BROADCAST_SPLAT1]], align 8 [[BROADCAST_SPLAT2]], [[TMP10]], i32 [[TMP9]]) ; TF-SCALABLE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], splat (i1 true), i32 [[TMP9]]) -; TF-SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64 -; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]] -; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]] +; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]] +; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]] ; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 ; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll index f33c8d9e9ae9b..ed288d2f99a0b 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/epilog-vectorization-inductions.ll @@ -145,11 +145,9 @@ define void @test_induction_step_needs_expansion(ptr noalias %j, ptr %k, i64 %l, ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[IND_END:%.*]] = mul i16 [[DOTCAST]], [[TMP0]] ; CHECK-NEXT: [[TMP1:%.*]] = mul <16 x i16> splat (i16 16), [[TMP2]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <16 x i16> poison, i16 [[OFF]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <16 x i16> [[BROADCAST_SPLATINSERT2]], <16 x i16> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[TMP0]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[OFF]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i16> [[DOTSPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer -; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i16> , [[DOTSPLAT]] +; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i16> , [[TMP2]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <16 x i16> zeroinitializer, [[TMP11]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -158,10 +156,10 @@ define void @test_induction_step_needs_expansion(ptr noalias %j, ptr %k, i64 %l, ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <16 x i16> [[VEC_IND]], [[TMP1]] ; CHECK-NEXT: [[STEP_ADD_2:%.*]] = add <16 x i16> [[STEP_ADD]], [[TMP1]] ; CHECK-NEXT: [[STEP_ADD_3:%.*]] = add <16 x i16> [[STEP_ADD_2]], [[TMP1]] -; CHECK-NEXT: [[TMP4:%.*]] = sub <16 x i16> [[VEC_IND]], [[BROADCAST_SPLAT3]] -; CHECK-NEXT: [[TMP5:%.*]] = sub <16 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT3]] -; CHECK-NEXT: [[TMP6:%.*]] = sub <16 x i16> [[STEP_ADD_2]], [[BROADCAST_SPLAT3]] -; CHECK-NEXT: [[TMP7:%.*]] = sub <16 x i16> [[STEP_ADD_3]], [[BROADCAST_SPLAT3]] +; CHECK-NEXT: [[TMP4:%.*]] = sub <16 x i16> [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP5:%.*]] = sub <16 x i16> [[STEP_ADD]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = sub <16 x i16> [[STEP_ADD_2]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP7:%.*]] = sub <16 x i16> [[STEP_ADD_3]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[K:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 16 ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i16, ptr [[TMP8]], i32 32 diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll index 4b4103e9806b9..61f07eff768c1 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/induction-step.ll @@ -17,9 +17,7 @@ define i16 @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = mul i16 [[DOTCAST]], [[O_1]] ; CHECK-NEXT: [[TMP1:%.*]] = mul <4 x i16> splat (i16 4), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[O_1]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i16> [[DOTSPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i16> , [[DOTSPLAT]] +; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i16> , [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i16> zeroinitializer, [[TMP2]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: @@ -89,19 +87,17 @@ define i16 @wide_sub_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[TMP1:%.*]] = mul i16 [[DOTCAST]], [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i16> splat (i16 4), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i16> poison, i16 [[O_1]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT1]], <4 x i16> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[TMP0]], i64 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[O_1]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i16> [[DOTSPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i16> , [[DOTSPLAT]] +; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i16> , [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i16> zeroinitializer, [[TMP3]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i16> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i16> [[VEC_IND]], [[TMP2]] -; CHECK-NEXT: [[TMP5:%.*]] = sub <4 x i16> [[VEC_IND]], [[BROADCAST_SPLAT2]] -; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT2]] +; CHECK-NEXT: [[TMP5:%.*]] = sub <4 x i16> [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP10:%.*]] = sub <4 x i16> [[STEP_ADD]], [[DOTSPLAT]] ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[DST:%.*]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[TMP6]], i32 4 ; CHECK-NEXT: store <4 x i16> [[TMP5]], ptr [[TMP6]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll index bdef894794850..90f3df50153a2 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll @@ -40,13 +40,10 @@ define void @smax_call_uniform(ptr %dst, i64 %x) { ; CHECK-NEXT: br label %[[PRED_UREM_CONTINUE6]] ; CHECK: [[PRED_UREM_CONTINUE6]]: ; CHECK-NEXT: [[TMP13:%.*]] = tail call i64 @llvm.smax.i64(i64 0, i64 0) -; CHECK-NEXT: [[P:%.*]] = select i1 [[C]], i64 1, i64 [[TMP13]] ; CHECK-NEXT: [[PREDPHI7:%.*]] = select i1 [[C]], i64 1, i64 [[TMP13]] -; CHECK-NEXT: [[ADD:%.*]] = add i64 [[P]], 1 ; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[PREDPHI7]], 1 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[DST]], i64 [[ADD]] ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP17]] -; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8 +; CHECK-NEXT: store i64 0, ptr [[TMP19]], align 8 ; CHECK-NEXT: store i64 0, ptr [[TMP19]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 diff --git a/llvm/test/Transforms/LoopVectorize/blend-in-header.ll b/llvm/test/Transforms/LoopVectorize/blend-in-header.ll index 85f72d283a0e4..6f262109f95be 100644 --- a/llvm/test/Transforms/LoopVectorize/blend-in-header.ll +++ b/llvm/test/Transforms/LoopVectorize/blend-in-header.ll @@ -111,8 +111,6 @@ define i64 @invar_cond(i1 %c) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -164,8 +162,6 @@ define i64 @invar_cond_incoming_ops_reordered(i1 %c) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] diff --git a/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll b/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll index ff550da1ae0e1..4af9f4a13b62b 100644 --- a/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll +++ b/llvm/test/Transforms/LoopVectorize/expand-scev-after-invoke.ll @@ -18,9 +18,7 @@ define void @test(ptr %dst) personality ptr null { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP0:%.*]] = mul i32 160, [[STEP]] ; CHECK-NEXT: [[TMP1:%.*]] = mul <4 x i32> splat (i32 4), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STEP]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i32> , [[DOTSPLAT]] +; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i32> , [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> zeroinitializer, [[TMP2]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: diff --git a/llvm/test/Transforms/LoopVectorize/float-induction.ll b/llvm/test/Transforms/LoopVectorize/float-induction.ll index 2b15aae628274..901f67ee676ee 100644 --- a/llvm/test/Transforms/LoopVectorize/float-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/float-induction.ll @@ -85,17 +85,15 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N) ; VEC4_INTERL2: vector.ph: ; VEC4_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483640 ; VEC4_INTERL2-NEXT: [[FPINC_INS:%.*]] = insertelement <4 x float> poison, float [[FPINC]], i64 0 +; VEC4_INTERL2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[FPINC_INS]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = fmul fast float [[FPINC]], [[DOTCAST]] ; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fsub fast float [[INIT:%.*]], [[TMP1]] -; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[FPINC_INS]], -; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 -; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[FPINC]], i64 0 +; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], splat (float 4.000000e+00) +; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = fmul fast <4 x float> [[DOTSPLAT3]], -; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fsub fast <4 x float> [[DOTSPLAT]], [[TMP2]] +; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], +; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fsub fast <4 x float> [[DOTSPLAT3]], [[TMP7]] ; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC4_INTERL2: vector.body: ; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -332,17 +330,15 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32 ; VEC4_INTERL2: vector.ph: ; VEC4_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483640 ; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[FPINC]], i64 0 +; VEC4_INTERL2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = fmul reassoc float [[FPINC]], [[DOTCAST]] ; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fsub reassoc float [[INIT:%.*]], [[TMP1]] -; VEC4_INTERL2-NEXT: [[MUL:%.*]] = fmul reassoc <4 x float> [[DOTSPLATINSERT2]], -; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = shufflevector <4 x float> [[MUL]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 -; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <4 x float> poison, float [[FPINC]], i64 0 +; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = fmul reassoc <4 x float> [[BROADCAST_SPLAT]], splat (float 4.000000e+00) +; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT3:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT1]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = fmul reassoc <4 x float> [[DOTSPLAT3]], -; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fsub reassoc <4 x float> [[DOTSPLAT]], [[TMP2]] +; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = fmul reassoc <4 x float> [[BROADCAST_SPLAT]], +; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fsub reassoc <4 x float> [[DOTSPLAT3]], [[TMP7]] ; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC4_INTERL2: vector.body: ; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -834,22 +830,20 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC4_INTERL2: vector.ph: ; VEC4_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP1]], 2147483640 ; VEC4_INTERL2-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 +; VEC4_INTERL2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP2:%.*]] = fmul fast float [[DOTCAST]], -5.000000e-01 ; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000 ; VEC4_INTERL2-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]] ; VEC4_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]] -; VEC4_INTERL2-NEXT: [[TMP19:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLATINSERT2]], -; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = shufflevector <4 x float> [[TMP19]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT2:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 -; VEC4_INTERL2-NEXT: [[BROADCAST:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT2]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 +; VEC4_INTERL2-NEXT: [[DOTSPLAT5:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], splat (float 4.000000e+00) +; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT6:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 +; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT6:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT7:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT6]], <4 x float> poison, <4 x i32> zeroinitializer -; VEC4_INTERL2-NEXT: [[TMP4:%.*]] = fmul fast <4 x float> [[DOTSPLAT7]], -; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], [[TMP4]] +; VEC4_INTERL2-NEXT: [[TMP19:%.*]] = fmul fast <4 x float> [[BROADCAST_SPLAT]], +; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fadd fast <4 x float> [[DOTSPLAT7]], [[TMP19]] ; VEC4_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC4_INTERL2: vector.body: ; VEC4_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -860,8 +854,8 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC4_INTERL2-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 16 ; VEC4_INTERL2-NEXT: store <4 x float> [[VEC_IND10]], ptr [[TMP6]], align 4 ; VEC4_INTERL2-NEXT: store <4 x float> [[STEP_ADD11]], ptr [[TMP7]], align 4 -; VEC4_INTERL2-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[VEC_IND10]], [[BROADCAST]] -; VEC4_INTERL2-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[STEP_ADD11]], [[BROADCAST]] +; VEC4_INTERL2-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[VEC_IND10]], [[DOTSPLAT]] +; VEC4_INTERL2-NEXT: [[TMP9:%.*]] = fadd fast <4 x float> [[STEP_ADD11]], [[DOTSPLAT]] ; VEC4_INTERL2-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[VEC_IND]], splat (float -5.000000e-01) ; VEC4_INTERL2-NEXT: [[TMP11:%.*]] = fadd fast <4 x float> [[VEC_IND]], splat (float -2.500000e+00) ; VEC4_INTERL2-NEXT: [[TMP12:%.*]] = fadd fast <4 x float> [[TMP10]], [[TMP8]] diff --git a/llvm/test/Transforms/LoopVectorize/induction-step.ll b/llvm/test/Transforms/LoopVectorize/induction-step.ll index b3cb3a77467ee..362de0e0bba7a 100644 --- a/llvm/test/Transforms/LoopVectorize/induction-step.ll +++ b/llvm/test/Transforms/LoopVectorize/induction-step.ll @@ -337,8 +337,6 @@ define void @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16 ; CHECK-NEXT: [[TMP0:%.*]] = mul i16 [[DOTCAST]], [[O_1]] -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[O_1]], i64 0 -; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer ; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <8 x i16> poison, i16 [[O_1]], i64 0 ; CHECK-NEXT: [[DOTSPLAT1:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT1]], <8 x i16> poison, <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP1:%.*]] = mul <8 x i16> , [[DOTSPLAT1]] @@ -350,7 +348,7 @@ define void @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[TMP3:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <8 x i16> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = add <8 x i16> [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP4:%.*]] = add <8 x i16> [[VEC_IND]], [[DOTSPLAT1]] ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP3]] ; CHECK-NEXT: store <8 x i16> [[TMP4]], ptr [[TMP5]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP3]], 8 @@ -362,11 +360,11 @@ define void @wide_add_induction_step_live_in(ptr %dst, i64 %N, i16 %off) { ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i16 [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i16 [ [[TMP0]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] -; CHECK-NEXT: [[IV_2:%.*]] = phi i16 [ [[BC_RESUME_VAL5]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_2:%.*]] = phi i16 [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[ADD]] = add i16 [[IV_2]], [[O_1]] ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] ; CHECK-NEXT: store i16 [[ADD]], ptr [[GEP_DST]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll index 261c336b329fa..60c844c3f6415 100644 --- a/llvm/test/Transforms/LoopVectorize/induction.ll +++ b/llvm/test/Transforms/LoopVectorize/induction.ll @@ -6211,12 +6211,10 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; UNROLL: vector.ph: ; UNROLL-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -4 ; UNROLL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0 +; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; UNROLL-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 ; UNROLL-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]] -; UNROLL-NEXT: [[TMP15:%.*]] = shl <2 x i32> [[BROADCAST_SPLATINSERT]], -; UNROLL-NEXT: [[TMP16:%.*]] = shufflevector <2 x i32> [[TMP15]], <2 x i32> poison, <2 x i32> zeroinitializer -; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0 -; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer +; UNROLL-NEXT: [[TMP16:%.*]] = shl <2 x i32> [[DOTSPLAT]], splat (i32 1) ; UNROLL-NEXT: [[TMP17:%.*]] = mul nuw <2 x i32> [[DOTSPLAT]], ; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]] ; UNROLL: vector.body: @@ -6293,9 +6291,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; UNROLL-NO-IC-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 ; UNROLL-NO-IC-NEXT: [[IND_END:%.*]] = mul i32 [[DOTCAST]], [[STEP]] ; UNROLL-NO-IC-NEXT: [[TMP17:%.*]] = mul <2 x i32> splat (i32 2), [[BROADCAST_SPLAT]] -; UNROLL-NO-IC-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0 -; UNROLL-NO-IC-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer -; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = mul <2 x i32> , [[DOTSPLAT]] +; UNROLL-NO-IC-NEXT: [[TMP18:%.*]] = mul <2 x i32> , [[BROADCAST_SPLAT]] ; UNROLL-NO-IC-NEXT: [[INDUCTION:%.*]] = add <2 x i32> zeroinitializer, [[TMP18]] ; UNROLL-NO-IC-NEXT: br label [[VECTOR_BODY:%.*]] ; UNROLL-NO-IC: vector.body: @@ -6365,12 +6361,10 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; INTERLEAVE: vector.ph: ; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -8 ; INTERLEAVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STEP]], i64 0 +; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; INTERLEAVE-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 ; INTERLEAVE-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]] -; INTERLEAVE-NEXT: [[TMP15:%.*]] = shl <4 x i32> [[BROADCAST_SPLATINSERT]], -; INTERLEAVE-NEXT: [[TMP16:%.*]] = shufflevector <4 x i32> [[TMP15]], <4 x i32> poison, <4 x i32> zeroinitializer -; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STEP]], i64 0 -; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; INTERLEAVE-NEXT: [[TMP16:%.*]] = shl <4 x i32> [[DOTSPLAT]], splat (i32 2) ; INTERLEAVE-NEXT: [[TMP17:%.*]] = mul <4 x i32> [[DOTSPLAT]], ; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] ; INTERLEAVE: vector.body: diff --git a/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll index 6cdd154f0e00e..8525b3aa5d349 100644 --- a/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/nested-loops-scev-expansion.ll @@ -240,11 +240,9 @@ define void @pr52024(ptr %dst, i16 %N) { ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = mul i16 24, [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = mul <2 x i16> splat (i16 2), [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <2 x i16> poison, i16 [[REM_TRUNC]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT3]], <2 x i16> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <2 x i16> poison, i16 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <2 x i16> poison, i16 [[REM_TRUNC]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT5]], <2 x i16> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = mul <2 x i16> , [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP7:%.*]] = mul <2 x i16> , [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[INDUCTION:%.*]] = add <2 x i16> zeroinitializer, [[TMP7]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: @@ -252,8 +250,8 @@ define void @pr52024(ptr %dst, i16 %N) { ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i16> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i16> [[VEC_IND]], [[TMP6]] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 8, [[INDEX]] -; CHECK-NEXT: [[TMP8:%.*]] = sub <2 x i16> [[VEC_IND]], [[BROADCAST_SPLAT4]] -; CHECK-NEXT: [[TMP9:%.*]] = sub <2 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT4]] +; CHECK-NEXT: [[TMP8:%.*]] = sub <2 x i16> [[VEC_IND]], [[BROADCAST_SPLAT6]] +; CHECK-NEXT: [[TMP9:%.*]] = sub <2 x i16> [[STEP_ADD]], [[BROADCAST_SPLAT6]] ; CHECK-NEXT: [[TMP10:%.*]] = zext <2 x i16> [[TMP8]] to <2 x i32> ; CHECK-NEXT: [[TMP11:%.*]] = zext <2 x i16> [[TMP9]] to <2 x i32> ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[DST]], i32 [[OFFSET_IDX]] diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll index f6e8de608645a..f4d4cca0d4220 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll @@ -2675,8 +2675,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP13]], <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]]) ; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[VEC_PHI]], [[TMP7]] -; CHECK-NEXT: [[TMP18:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32> -; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP18]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> [[TMP13]], <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP9]]) ; CHECK-NEXT: [[TMP11]] = add i32 [[TMP8]], [[TMP10]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 @@ -2746,12 +2745,10 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP28]], <4 x i32> [[TMP30]], <4 x i32> zeroinitializer ; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP12]]) ; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = add i32 [[VEC_PHI1]], [[TMP13]] -; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = zext <4 x i8> [[TMP7]] to <4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = select <4 x i1> [[TMP27]], <4 x i32> [[TMP31]], <4 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = select <4 x i1> [[TMP27]], <4 x i32> [[TMP29]], <4 x i32> zeroinitializer ; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP15]]) ; CHECK-INTERLEAVED-NEXT: [[TMP17]] = add i32 [[TMP11]], [[TMP16]] -; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = zext <4 x i8> [[TMP8]] to <4 x i32> -; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = select <4 x i1> [[TMP28]], <4 x i32> [[TMP22]], <4 x i32> zeroinitializer +; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = select <4 x i1> [[TMP28]], <4 x i32> [[TMP30]], <4 x i32> zeroinitializer ; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP18]]) ; CHECK-INTERLEAVED-NEXT: [[TMP20]] = add i32 [[TMP14]], [[TMP19]] ; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 diff --git a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll index cb0c778b95026..73d5e26ef82a2 100644 --- a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll +++ b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll @@ -220,14 +220,18 @@ define void @expand_diff_scev_unknown(ptr %dst, i1 %invar.c, i32 %step) mustprog ; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[STEP]], i32 1) ; CHECK-NEXT: [[TMP8:%.*]] = udiv i32 [[TMP7]], [[UMAX]] ; CHECK-NEXT: [[TMP9:%.*]] = add i32 [[TMP6]], [[TMP8]] -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP12:%.*]] = add i32 [[INDVAR_LCSSA1]], 2 +; CHECK-NEXT: [[SMAX1:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP12]], i32 0) +; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP3]], -1 +; CHECK-NEXT: [[TMP15:%.*]] = add i32 [[SMAX1]], [[TMP14]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP15]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; CHECK: [[VECTOR_SCEVCHECK]]: ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[STEP]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP9]], 2 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP9]], [[N_MOD_VF]] +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP15]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP15]], [[N_MOD_VF]] ; CHECK-NEXT: [[TMP10:%.*]] = add i32 [[IV_1_LCSSA]], [[N_VEC]] ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: @@ -239,7 +243,7 @@ define void @expand_diff_scev_unknown(ptr %dst, i1 %invar.c, i32 %step) mustprog ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP9]], [[N_VEC]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP15]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ [[IV_1_LCSSA]], %[[LOOP_2_PREHEADER]] ], [ [[IV_1_LCSSA]], %[[VECTOR_SCEVCHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll index 0b86a2280b529..027dcaf771072 100644 --- a/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll +++ b/llvm/test/Transforms/LoopVectorize/version-stride-with-integer-casts.ll @@ -22,13 +22,11 @@ define void @test_versioned_with_sext_use(i32 %offset, ptr %dst) { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP3]] ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -94,13 +92,11 @@ define void @test_versioned_with_zext_use(i32 %offset, ptr %dst) { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP3]] ; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr [[TMP4]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -233,13 +229,11 @@ define void @test_versioned_with_different_uses(i32 %offset, ptr noalias %dst.1, ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[OFFSET]], 1 ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[TMP0:%.*]] = mul i64 200, [[OFFSET_EXT]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[IV_1]], 200 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[INDEX]], [[OFFSET_EXT]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IV_1]], [[INDEX]] ; CHECK-NEXT: [[OFFSET_IDX2:%.*]] = trunc i64 [[INDEX]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[OFFSET_IDX2]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX2]], 1 @@ -414,26 +408,20 @@ define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress { ; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true ; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 4 -; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] -; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], [[G_64]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[G_64]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX]] ; CHECK-NEXT: store <4 x i16> splat (i16 1), ptr [[TMP4]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br label [[EXIT:%.*]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]] ; CHECK-NEXT: store i16 [[G_16]], ptr [[GEP]], align 2 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], [[G_64]] diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll index b056f44a6c469..8d20a3ba8ed08 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll @@ -14,16 +14,9 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK-NEXT: [[SUB:%.*]] = add i32 [[XA]], -1 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[SUB]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[XB]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = add nsw i64 [[TMP1]], [[TMP0]] -; CHECK-NEXT: [[SMAX7:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP2]], i64 32000) -; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 32000 -; CHECK-NEXT: [[UMIN8:%.*]] = zext i1 [[TMP3]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP2]], [[UMIN8]] -; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[SMAX7]], [[TMP4]] -; CHECK-NEXT: [[UMAX9:%.*]] = tail call i64 @llvm.umax.i64(i64 [[TMP1]], i64 1) -; CHECK-NEXT: [[TMP6:%.*]] = udiv i64 [[TMP5]], [[UMAX9]] -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], [[UMIN8]] -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP0]], i64 31999) +; CHECK-NEXT: [[SMAX10:%.*]] = add nuw nsw i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP8:%.*]] = sub i64 [[SMAX10]], [[TMP0]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP8]], 23 ; CHECK-NEXT: [[IDENT_CHECK_NOT:%.*]] = icmp eq i32 [[XB]], 1 ; CHECK-NEXT: [[OR_COND:%.*]] = and i1 [[MIN_ITERS_CHECK]], [[IDENT_CHECK_NOT]] @@ -50,13 +43,11 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY_PREHEADER13]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], -8 -; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[N_VEC]], [[TMP1]] -; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP18]], [[TMP0]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], [[TMP0]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[INDEX]], [[TMP1]] -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP19]], [[TMP0]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[INDEX]], [[TMP0]] ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP20]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP20]], align 4, !alias.scope [[META0:![0-9]+]] @@ -75,7 +66,7 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END]], label [[FOR_BODY_PREHEADER13]] -; CHECK: for.body.preheader13: +; CHECK: for.body.preheader14: ; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ [[TMP0]], [[VECTOR_MEMCHECK]] ], [ [[TMP0]], [[FOR_BODY_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: diff --git a/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll b/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll index fe2e897125eb8..39703e9b53b6b 100644 --- a/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-to-select-two-case.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals ; RUN: opt < %s -passes=simplifycfg -simplifycfg-require-and-preserve-domtree=1 -S | FileCheck %s ; int foo1_with_default(int a) { @@ -11,20 +11,20 @@ ; return 4; ; } -define i32 @foo1_with_default(i32 %a) { +define i32 @foo1_with_default(i32 %a) !prof !0 { ; CHECK-LABEL: @foo1_with_default( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[A:%.*]], 20 -; CHECK-NEXT: [[SWITCH_SELECT:%.*]] = select i1 [[SWITCH_SELECTCMP]], i32 2, i32 4 +; CHECK-NEXT: [[SWITCH_SELECT:%.*]] = select i1 [[SWITCH_SELECTCMP]], i32 2, i32 4, !prof [[PROF1:![0-9]+]] ; CHECK-NEXT: [[SWITCH_SELECTCMP1:%.*]] = icmp eq i32 [[A]], 10 -; CHECK-NEXT: [[SWITCH_SELECT2:%.*]] = select i1 [[SWITCH_SELECTCMP1]], i32 10, i32 [[SWITCH_SELECT]] +; CHECK-NEXT: [[SWITCH_SELECT2:%.*]] = select i1 [[SWITCH_SELECTCMP1]], i32 10, i32 [[SWITCH_SELECT]], !prof [[PROF2:![0-9]+]] ; CHECK-NEXT: ret i32 [[SWITCH_SELECT2]] ; entry: switch i32 %a, label %sw.epilog [ i32 10, label %sw.bb i32 20, label %sw.bb1 - ] + ], !prof !1 sw.bb: br label %return @@ -41,20 +41,20 @@ return: } ; Same as above, but both cases have the same value. -define i32 @same_value(i32 %a) { +define i32 @same_value(i32 %a) !prof !0 { ; CHECK-LABEL: @same_value( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_SELECTCMP_CASE1:%.*]] = icmp eq i32 [[A:%.*]], 10 ; CHECK-NEXT: [[SWITCH_SELECTCMP_CASE2:%.*]] = icmp eq i32 [[A]], 20 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = or i1 [[SWITCH_SELECTCMP_CASE1]], [[SWITCH_SELECTCMP_CASE2]] -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i32 10, i32 4 +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i32 10, i32 4, !prof [[PROF3:![0-9]+]] ; CHECK-NEXT: ret i32 [[TMP0]] ; entry: switch i32 %a, label %sw.epilog [ i32 10, label %sw.bb i32 20, label %sw.bb - ] + ], !prof !1 sw.bb: br label %return @@ -67,17 +67,17 @@ return: ret i32 %retval.0 } -define i1 @switch_to_select_same2_case_results_different_default(i8 %0) { +define i1 @switch_to_select_same2_case_results_different_default(i8 %0) !prof !0 { ; CHECK-LABEL: @switch_to_select_same2_case_results_different_default( ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i8 [[TMP0:%.*]], -5 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i8 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %0, label %2 [ i8 4, label %3 i8 0, label %3 - ] + ], !prof !1 2: br label %3 @@ -87,18 +87,18 @@ define i1 @switch_to_select_same2_case_results_different_default(i8 %0) { ret i1 %4 } -define i1 @switch_to_select_same2_case_results_different_default_and_positive_offset_for_case(i8 %0) { +define i1 @switch_to_select_same2_case_results_different_default_and_positive_offset_for_case(i8 %0) !prof !0 { ; CHECK-LABEL: @switch_to_select_same2_case_results_different_default_and_positive_offset_for_case( ; CHECK-NEXT: [[TMP2:%.*]] = sub i8 [[TMP0:%.*]], 43 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i8 [[TMP2]], -3 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i8 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP3]] ; switch i8 %0, label %2 [ i8 43, label %3 i8 45, label %3 - ] + ], !prof !1 2: br label %3 @@ -108,20 +108,20 @@ define i1 @switch_to_select_same2_case_results_different_default_and_positive_of ret i1 %4 } -define i8 @switch_to_select_same2_case_results_different_default_and_negative_offset_for_case(i32 %i) { +define i8 @switch_to_select_same2_case_results_different_default_and_negative_offset_for_case(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_same2_case_results_different_default_and_negative_offset_for_case( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = sub i32 [[I:%.*]], -5 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i32 [[TMP0]], -3 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i8 3, i8 42 +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i8 3, i8 42, !prof [[PROF3]] ; CHECK-NEXT: ret i8 [[TMP1]] ; entry: switch i32 %i, label %default [ i32 -3, label %end i32 -5, label %end - ] + ], !prof !1 default: br label %end @@ -131,12 +131,12 @@ end: ret i8 %t0 } -define i1 @switch_to_select_same4_case_results_different_default(i32 %i) { +define i1 @switch_to_select_same4_case_results_different_default(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_same4_case_results_different_default( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i32 [[I:%.*]], -7 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF4:![0-9]+]] ; CHECK-NEXT: ret i1 [[TMP0]] ; entry: @@ -145,7 +145,7 @@ entry: i32 2, label %lor.end i32 4, label %lor.end i32 6, label %lor.end - ] + ], !prof !2 lor.rhs: br label %lor.end @@ -155,12 +155,12 @@ lor.end: ret i1 %0 } -define i1 @switch_to_select_same4_case_results_different_default_alt_bitmask(i32 %i) { +define i1 @switch_to_select_same4_case_results_different_default_alt_bitmask(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_same4_case_results_different_default_alt_bitmask( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i32 [[I:%.*]], -11 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP0]] ; entry: @@ -169,7 +169,7 @@ entry: i32 2, label %lor.end i32 8, label %lor.end i32 10, label %lor.end - ] + ], !prof !2 lor.rhs: br label %lor.end @@ -179,13 +179,13 @@ lor.end: ret i1 %0 } -define i1 @switch_to_select_same4_case_results_different_default_positive_offset(i32 %i) { +define i1 @switch_to_select_same4_case_results_different_default_positive_offset(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_same4_case_results_different_default_positive_offset( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = sub i32 [[I:%.*]], 2 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i32 [[TMP0]], -11 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i32 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP1]] ; entry: @@ -194,7 +194,7 @@ entry: i32 4, label %lor.end i32 10, label %lor.end i32 12, label %lor.end - ] + ], !prof !2 lor.rhs: br label %lor.end @@ -204,7 +204,7 @@ lor.end: ret i1 %0 } -define i1 @switch_to_select_invalid_mask(i32 %i) { +define i1 @switch_to_select_invalid_mask(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_invalid_mask( ; CHECK-NEXT: entry: ; CHECK-NEXT: switch i32 [[I:%.*]], label [[LOR_RHS:%.*]] [ @@ -212,7 +212,7 @@ define i1 @switch_to_select_invalid_mask(i32 %i) { ; CHECK-NEXT: i32 4, label [[LOR_END]] ; CHECK-NEXT: i32 10, label [[LOR_END]] ; CHECK-NEXT: i32 12, label [[LOR_END]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF5:![0-9]+]] ; CHECK: lor.rhs: ; CHECK-NEXT: br label [[LOR_END]] ; CHECK: lor.end: @@ -225,7 +225,7 @@ entry: i32 4, label %lor.end i32 10, label %lor.end i32 12, label %lor.end - ] + ], !prof !2 lor.rhs: br label %lor.end @@ -235,14 +235,14 @@ lor.end: ret i1 %0 } -define i1 @switch_to_select_nonpow2_cases(i32 %i) { +define i1 @switch_to_select_nonpow2_cases(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_nonpow2_cases( ; CHECK-NEXT: entry: ; CHECK-NEXT: switch i32 [[I:%.*]], label [[LOR_RHS:%.*]] [ ; CHECK-NEXT: i32 0, label [[LOR_END:%.*]] ; CHECK-NEXT: i32 2, label [[LOR_END]] ; CHECK-NEXT: i32 4, label [[LOR_END]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF6:![0-9]+]] ; CHECK: lor.rhs: ; CHECK-NEXT: br label [[LOR_END]] ; CHECK: lor.end: @@ -254,7 +254,7 @@ entry: i32 0, label %lor.end i32 2, label %lor.end i32 4, label %lor.end - ] + ], !prof !3 lor.rhs: br label %lor.end @@ -265,7 +265,7 @@ lor.end: } ; TODO: we can produce the optimal code when there is no default also -define i8 @switch_to_select_two_case_results_no_default(i32 %i) { +define i8 @switch_to_select_two_case_results_no_default(i32 %i) !prof !0 { ; CHECK-LABEL: @switch_to_select_two_case_results_no_default( ; CHECK-NEXT: entry: ; CHECK-NEXT: switch i32 [[I:%.*]], label [[DEFAULT:%.*]] [ @@ -273,7 +273,7 @@ define i8 @switch_to_select_two_case_results_no_default(i32 %i) { ; CHECK-NEXT: i32 2, label [[END]] ; CHECK-NEXT: i32 4, label [[CASE3:%.*]] ; CHECK-NEXT: i32 6, label [[CASE3]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF5]] ; CHECK: case3: ; CHECK-NEXT: br label [[END]] ; CHECK: default: @@ -288,7 +288,7 @@ entry: i32 2, label %case2 i32 4, label %case3 i32 6, label %case4 - ] + ], !prof !2 case1: br label %end @@ -310,12 +310,12 @@ end: ret i8 %t0 } -define i1 @no_range(i8 %f) { +define i1 @no_range(i8 %f) !prof !0 { ; CHECK-LABEL: @no_range( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 60 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 60 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF7:![0-9]+]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -335,7 +335,7 @@ define i1 @no_range(i8 %f) { i8 253, label %bb2 i8 254, label %bb2 i8 255, label %bb2 - ] + ], !prof !4 bb1: br label %bb3 bb2: @@ -345,7 +345,7 @@ bb3: ret i1 %phi } -define i1 @negative_no_range(i8 %f) { +define i1 @negative_no_range(i8 %f) !prof !0 { ; CHECK-LABEL: @negative_no_range( ; CHECK-NEXT: switch i8 [[F:%.*]], label [[BB3:%.*]] [ ; CHECK-NEXT: i8 52, label [[BB2:%.*]] @@ -364,12 +364,12 @@ define i1 @negative_no_range(i8 %f) { ; CHECK-NEXT: i8 -3, label [[BB2]] ; CHECK-NEXT: i8 -2, label [[BB2]] ; CHECK-NEXT: i8 -1, label [[BB2]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF8:![0-9]+]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] ; CHECK: bb3: -; CHECK-NEXT: [[_0_SROA_0_0:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] -; CHECK-NEXT: ret i1 [[_0_SROA_0_0]] +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] ; switch i8 %f, label %bb1 [ i8 52, label %bb2 @@ -388,7 +388,7 @@ define i1 @negative_no_range(i8 %f) { i8 253, label %bb2 i8 254, label %bb2 i8 255, label %bb2 - ] + ], !prof !4 bb1: br label %bb3 bb2: @@ -400,18 +400,19 @@ bb3: ; Using ranges. -define i1 @range0to4odd(i8 range(i8 0, 4) %f) { +define i1 @range0to4odd(i8 range(i8 0, 4) %f) !prof !0 { ; CHECK-LABEL: @range0to4odd( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ i8 1, label %bb2 i8 3, label %bb2 - ] + ], !prof !1 + bb1: br label %bb3 bb2: @@ -421,18 +422,18 @@ bb3: ret i1 %phi } -define i1 @range1to4odd(i8 range(i8 1, 4) %f) { +define i1 @range1to4odd(i8 range(i8 1, 4) %f) !prof !0 { ; CHECK-LABEL: @range1to4odd( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ i8 1, label %bb2 i8 3, label %bb2 - ] + ], !prof !1 bb1: br label %bb3 bb2: @@ -442,12 +443,12 @@ bb3: ret i1 %phi } -define i1 @range0to8odd(i8 range(i8 0, 8) %f) { +define i1 @range0to8odd(i8 range(i8 0, 8) %f) !prof !0 { ; CHECK-LABEL: @range0to8odd( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -455,7 +456,7 @@ define i1 @range0to8odd(i8 range(i8 0, 8) %f) { i8 3, label %bb2 i8 5, label %bb2 i8 7, label %bb2 - ] + ], !prof !2 bb1: br label %bb3 bb2: @@ -465,12 +466,12 @@ bb3: ret i1 %phi } -define i1 @range0to8most_significant_bit(i8 range(i8 0, 8) %f) { +define i1 @range0to8most_significant_bit(i8 range(i8 0, 8) %f) !prof !0 { ; CHECK-LABEL: @range0to8most_significant_bit( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 4 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -478,7 +479,7 @@ define i1 @range0to8most_significant_bit(i8 range(i8 0, 8) %f) { i8 5, label %bb2 i8 6, label %bb2 i8 7, label %bb2 - ] + ], !prof !2 bb1: br label %bb3 bb2: @@ -488,12 +489,12 @@ bb3: ret i1 %phi } -define i1 @range0to15_middle_two_bits(i8 range(i8 0, 16) %f) { +define i1 @range0to15_middle_two_bits(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @range0to15_middle_two_bits( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 6 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -501,7 +502,8 @@ define i1 @range0to15_middle_two_bits(i8 range(i8 0, 16) %f) { i8 7, label %bb2 i8 14, label %bb2 i8 15, label %bb2 - ] + ], !prof !2 + bb1: br label %bb3 bb2: @@ -511,24 +513,25 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15( ; CHECK-NEXT: switch i8 [[F:%.*]], label [[BB3:%.*]] [ ; CHECK-NEXT: i8 6, label [[BB2:%.*]] ; CHECK-NEXT: i8 7, label [[BB2]] ; CHECK-NEXT: i8 14, label [[BB2]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF6]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] ; CHECK: bb3: -; CHECK-NEXT: [[_0_SROA_0_0:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] -; CHECK-NEXT: ret i1 [[_0_SROA_0_0]] +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] ; switch i8 %f, label %bb1 [ i8 6, label %bb2 i8 7, label %bb2 i8 14, label %bb2 - ] + ], !prof !3 + bb1: br label %bb3 bb2: @@ -538,19 +541,19 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_pow_2(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_pow_2(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_pow_2( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = sub i8 [[F:%.*]], 6 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i8 [[TMP0]], -2 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i8 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP1]] ; switch i8 %f, label %bb1 [ i8 6, label %bb2 i8 7, label %bb2 - ] + ], !prof !1 bb1: br label %bb3 bb2: @@ -560,19 +563,19 @@ bb3: ret i1 %phi } -define i1 @negative_range0to5even(i8 range(i8 0, 5) %f) { +define i1 @negative_range0to5even(i8 range(i8 0, 5) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to5even( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = sub i8 [[F:%.*]], 2 ; CHECK-NEXT: [[SWITCH_AND:%.*]] = and i8 [[TMP0]], -3 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = icmp eq i8 [[SWITCH_AND]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP1]] ; switch i8 %f, label %bb1 [ i8 2, label %bb2 i8 4, label %bb2 - ] + ], !prof !1 bb1: br label %bb3 bb2: @@ -582,16 +585,17 @@ bb3: ret i1 %phi } -define i1 @range0to15_corner_case(i8 range(i8 0, 16) %f) { +define i1 @range0to15_corner_case(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @range0to15_corner_case( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[COND:%.*]] = icmp eq i8 [[F:%.*]], 15 -; CHECK-NEXT: [[DOT:%.*]] = select i1 [[COND]], i1 true, i1 false +; CHECK-NEXT: [[DOT:%.*]] = select i1 [[COND]], i1 true, i1 false, !prof [[PROF9:![0-9]+]] ; CHECK-NEXT: ret i1 [[DOT]] ; switch i8 %f, label %bb1 [ i8 15, label %bb2 - ] + ], !prof !5 + bb1: br label %bb3 bb2: @@ -601,19 +605,19 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_corner_case(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_corner_case(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_corner_case( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[SWITCH_SELECTCMP_CASE1:%.*]] = icmp eq i8 [[F:%.*]], 15 ; CHECK-NEXT: [[SWITCH_SELECTCMP_CASE2:%.*]] = icmp eq i8 [[F]], 8 ; CHECK-NEXT: [[SWITCH_SELECTCMP:%.*]] = or i1 [[SWITCH_SELECTCMP_CASE1]], [[SWITCH_SELECTCMP_CASE2]] -; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false +; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[SWITCH_SELECTCMP]], i1 true, i1 false, !prof [[PROF3]] ; CHECK-NEXT: ret i1 [[TMP0]] ; switch i8 %f, label %bb1 [ i8 15, label %bb2 - i8 8, label %bb2 - ] + i8 8, label %bb2 + ], !prof !1 bb1: br label %bb3 bb2: @@ -626,12 +630,12 @@ bb3: ; Out of range scenarios. Check if the cases, that have a value out of range ; are eliminated and the optimization is performed. -define i1 @range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) { +define i1 @range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @range0to15_out_of_range_non_prime( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 6 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -640,7 +644,7 @@ define i1 @range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) { i8 14, label %bb2 i8 15, label %bb2 i8 22, label %bb2 - ] + ], !prof !6 bb1: br label %bb3 bb2: @@ -650,12 +654,12 @@ bb3: ret i1 %phi } -define i1 @range0to15_out_of_range_non_prime_more(i8 range(i8 0, 16) %f) { +define i1 @range0to15_out_of_range_non_prime_more(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @range0to15_out_of_range_non_prime_more( ; CHECK-NEXT: bb3: ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[F:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 6 -; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i1 true, i1 false, !prof [[PROF4]] ; CHECK-NEXT: ret i1 [[TMP2]] ; switch i8 %f, label %bb1 [ @@ -665,7 +669,7 @@ define i1 @range0to15_out_of_range_non_prime_more(i8 range(i8 0, 16) %f) { i8 15, label %bb2 i8 22, label %bb2 i8 23, label %bb2 - ] + ], !prof !7 bb1: br label %bb3 bb2: @@ -675,25 +679,25 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_out_of_range_non_prime(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_out_of_range_non_prime( ; CHECK-NEXT: switch i8 [[F:%.*]], label [[BB3:%.*]] [ ; CHECK-NEXT: i8 6, label [[BB2:%.*]] ; CHECK-NEXT: i8 14, label [[BB2]] ; CHECK-NEXT: i8 15, label [[BB2]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF6]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] ; CHECK: bb3: -; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] -; CHECK-NEXT: ret i1 [[TMP2]] +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] ; switch i8 %f, label %bb1 [ i8 6, label %bb2 i8 14, label %bb2 i8 15, label %bb2 i8 23, label %bb2 - ] + ], !prof !2 bb1: br label %bb3 bb2: @@ -703,25 +707,25 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_out_of_range(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_out_of_range(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_out_of_range( ; CHECK-NEXT: switch i8 [[F:%.*]], label [[BB3:%.*]] [ ; CHECK-NEXT: i8 6, label [[BB2:%.*]] ; CHECK-NEXT: i8 7, label [[BB2]] ; CHECK-NEXT: i8 14, label [[BB2]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF6]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] ; CHECK: bb3: -; CHECK-NEXT: [[_0_SROA_0_0:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] -; CHECK-NEXT: ret i1 [[_0_SROA_0_0]] +; CHECK-NEXT: [[PHI:%.*]] = phi i1 [ true, [[BB2]] ], [ false, [[TMP0:%.*]] ] +; CHECK-NEXT: ret i1 [[PHI]] ; switch i8 %f, label %bb1 [ i8 6, label %bb2 i8 7, label %bb2 i8 14, label %bb2 - i8 150, label %bb2 - ] + i8 -106, label %bb2 + ], !prof !2 bb1: br label %bb3 bb2: @@ -731,7 +735,7 @@ bb3: ret i1 %phi } -define i1 @negative_range0to15_all_out_of_range(i8 range(i8 0, 16) %f) { +define i1 @negative_range0to15_all_out_of_range(i8 range(i8 0, 16) %f) !prof !0 { ; CHECK-LABEL: @negative_range0to15_all_out_of_range( ; CHECK-NEXT: bb1: ; CHECK-NEXT: ret i1 false @@ -741,7 +745,7 @@ define i1 @negative_range0to15_all_out_of_range(i8 range(i8 0, 16) %f) { i8 23, label %bb2 i8 30, label %bb2 i8 31, label %bb2 - ] + ], !prof !2 bb1: br label %bb3 bb2: @@ -750,3 +754,24 @@ bb3: %phi = phi i1 [ false, %bb1 ], [ true, %bb2 ] ret i1 %phi } + +!0 = !{!"function_entry_count", i64 1000} +!1 = !{!"branch_weights", i32 3, i32 5, i32 7} +!2 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13} +!3 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11} +!4 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17, i32 19, i32 23, i32 29, i32 31, i32 37, i32 41, i32 43, i32 47, i32 53, i32 59, i32 61} +!5 = !{!"branch_weights", i32 3, i32 5} +!6 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17} +!7 = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17, i32 19} +;. +; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF1]] = !{!"branch_weights", i32 7, i32 8} +; CHECK: [[PROF2]] = !{!"branch_weights", i32 5, i32 10} +; CHECK: [[PROF3]] = !{!"branch_weights", i32 12, i32 3} +; CHECK: [[PROF4]] = !{!"branch_weights", i32 36, i32 3} +; CHECK: [[PROF5]] = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13} +; CHECK: [[PROF6]] = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11} +; CHECK: [[PROF7]] = !{!"branch_weights", i32 496, i32 3} +; CHECK: [[PROF8]] = !{!"branch_weights", i32 3, i32 5, i32 7, i32 11, i32 13, i32 17, i32 19, i32 23, i32 29, i32 31, i32 37, i32 41, i32 43, i32 47, i32 53, i32 59, i32 61} +; CHECK: [[PROF9]] = !{!"branch_weights", i32 5, i32 3} +;. diff --git a/llvm/test/Verifier/preallocated-invalid.ll b/llvm/test/Verifier/preallocated-invalid.ll index 38ed1067c497d..2c5aff231e1bd 100644 --- a/llvm/test/Verifier/preallocated-invalid.ll +++ b/llvm/test/Verifier/preallocated-invalid.ll @@ -65,13 +65,21 @@ define void @preallocated_one_call() { ret void } -; CHECK: must be a constant +; CHECK: immarg operand has non-immediate parameter define void @preallocated_setup_constant() { %ac = call i32 @blackbox() %cs = call token @llvm.call.preallocated.setup(i32 %ac) ret void } +; CHECK: llvm.call.preallocated.alloc arg index must be a constant +define void @preallocated_arg_constant() { + %ac = call i32 @blackbox() + %cs = call token @llvm.call.preallocated.setup(i32 3) + call token @llvm.call.preallocated.arg(token %cs, i32 %ac) + ret void +} + ; CHECK: must be between 0 and corresponding define void @preallocated_setup_arg_index_in_bounds() { %cs = call token @llvm.call.preallocated.setup(i32 2) diff --git a/llvm/test/tools/llvm-lib/sym64-threshold.test b/llvm/test/tools/llvm-lib/sym64-threshold.test new file mode 100644 index 0000000000000..76f0a030274ef --- /dev/null +++ b/llvm/test/tools/llvm-lib/sym64-threshold.test @@ -0,0 +1,71 @@ +# RUN: yaml2obj --docnum=1 %s -o %t01234567890234567789.obj +# RUN: yaml2obj --docnum=2 %s -o %t-ec.obj +# RUN: env SYM64_THRESHOLD=100 llvm-lib -machine:amd64 -out:%t.lib %t01234567890234567789.obj +# RUN: llvm-nm --print-armap %t.lib | FileCheck --check-prefix=ARMAP %s +# ARMAP: Archive map +# ARMAP-NEXT: sym + +# RUN: env SYM64_THRESHOLD=100 not llvm-lib -machine:arm64x -out:%t-ec.lib %t-ec.obj %t01234567890234567789.obj 2>&1 | FileCheck %s +# CHECK: Archive is too large: ARM64X does not support archives larger than 4GB + +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_AMD64 + Characteristics: [ ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: '' +symbols: + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 1 + - !Symbol + Name: sym + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL # (0) + ComplexType: IMAGE_SYM_DTYPE_FUNCTION # (2) + StorageClass: IMAGE_SYM_CLASS_EXTERNAL # (2) +... + +--- !COFF +header: + Machine: IMAGE_FILE_MACHINE_ARM64 + Characteristics: [ ] +sections: + - Name: .text + Characteristics: [ IMAGE_SCN_CNT_CODE, IMAGE_SCN_MEM_EXECUTE, IMAGE_SCN_MEM_READ ] + Alignment: 4 + SectionData: '' +symbols: + - Name: .text + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL + ComplexType: IMAGE_SYM_DTYPE_NULL + StorageClass: IMAGE_SYM_CLASS_STATIC + SectionDefinition: + Length: 0 + NumberOfRelocations: 0 + NumberOfLinenumbers: 0 + CheckSum: 0 + Number: 1 + - !Symbol + Name: sym + Value: 0 + SectionNumber: 1 + SimpleType: IMAGE_SYM_TYPE_NULL # (0) + ComplexType: IMAGE_SYM_DTYPE_FUNCTION # (2) + StorageClass: IMAGE_SYM_CLASS_EXTERNAL # (2) +... diff --git a/llvm/test/tools/llvm-mca/RISCV/SiFive7/vgather-vcompress.s b/llvm/test/tools/llvm-mca/RISCV/SiFive7/vgather-vcompress.s new file mode 100644 index 0000000000000..4ec1683a886dc --- /dev/null +++ b/llvm/test/tools/llvm-mca/RISCV/SiFive7/vgather-vcompress.s @@ -0,0 +1,314 @@ +# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py +# RUN: llvm-mca -mtriple=riscv64 -mcpu=sifive-x280 -instruction-tables=full -iterations=1 < %s | FileCheck %s + +# The legal (SEW, LMUL) pairs are: +# (e8, mf8) (e8, mf4) (e8, mf2) (e8, m1) (e8, m2) (e8, m4) (e8, m8) +# (e16, mf4) (e16, mf2) (e16, m1) (e16, m2) (e16, m4) (e16, m8) +# (e32, mf2) (e32, m1) (e32, m2) (e32, m4) (e32, m8) +# (e64, m1) (e64, m2) (e64, m4) (e64, m8) + +vsetvli zero, zero, e8, mf8, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e8, mf4, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e8, mf2, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e8, m1, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e8, m2, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e8, m4, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e8, m8, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e16, mf4, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e16, mf2, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e16, m1, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e16, m2, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e16, m4, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e16, m8, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e32, mf2, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e32, m1, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e32, m2, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e32, m4, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e32, m8, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e64, m1, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e64, m2, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e64, m4, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 +vsetvli zero, zero, e64, m8, tu, mu +vrgather.vv v8, v16, v24 +vrgatherei16.vv v8, v16, v24 +vcompress.vm v8, v16, v24 + +# CHECK: Resources: +# CHECK-NEXT: [0] - VLEN512SiFive7FDiv:1 +# CHECK-NEXT: [1] - VLEN512SiFive7IDiv:1 +# CHECK-NEXT: [2] - VLEN512SiFive7PipeA:1 +# CHECK-NEXT: [3] - VLEN512SiFive7PipeAB:2 VLEN512SiFive7PipeA, VLEN512SiFive7PipeB +# CHECK-NEXT: [4] - VLEN512SiFive7PipeB:1 +# CHECK-NEXT: [5] - VLEN512SiFive7VA:1 +# CHECK-NEXT: [6] - VLEN512SiFive7VCQ:1 +# CHECK-NEXT: [7] - VLEN512SiFive7VL:1 +# CHECK-NEXT: [8] - VLEN512SiFive7VS:1 + +# CHECK: Instruction Info: +# CHECK-NEXT: [1]: #uOps +# CHECK-NEXT: [2]: Latency +# CHECK-NEXT: [3]: RThroughput +# CHECK-NEXT: [4]: MayLoad +# CHECK-NEXT: [5]: MayStore +# CHECK-NEXT: [6]: HasSideEffects (U) +# CHECK-NEXT: [7]: Bypass Latency +# CHECK-NEXT: [8]: Resources ( | [] | [, %t -RUN: llvm-strings %t - | FileCheck %s --check-prefix=CASE1 --implicit-check-not={{.}} +RUN: echo -n "abcdefg" | llvm-strings - | FileCheck %s --check-prefix=CASE1 --implicit-check-not={{.}} CASE1: abcdefg ## Case 2: output too short for string. -RUN: echo -n "abc" > %t -RUN: llvm-strings %t - | FileCheck %s --implicit-check-not={{.}} --allow-empty +RUN: echo -n "abc" | llvm-strings - | FileCheck %s --implicit-check-not={{.}} --allow-empty ## Case 3: output with new line. RUN: printf "abcd\nefgh" | llvm-strings - | FileCheck %s --check-prefix=CASE3 --implicit-check-not={{.}} @@ -21,5 +21,4 @@ CASE4-NEXT: ghij CASE4-NEXT: klmn ## Case 5: no file name specified is equivalent to explicitly requesting stdin. -RUN: echo -n "abcdefg" > %t -RUN: llvm-strings %t - | FileCheck %s --check-prefix=CASE1 --implicit-check-not={{.}} +RUN: echo -n "abcdefg" | llvm-strings | FileCheck %s --check-prefix=CASE1 --implicit-check-not={{.}} diff --git a/llvm/test/tools/llvm-tli-checker/ifuncs.yaml b/llvm/test/tools/llvm-tli-checker/ifuncs.yaml new file mode 100644 index 0000000000000..4eae66c3051a7 --- /dev/null +++ b/llvm/test/tools/llvm-tli-checker/ifuncs.yaml @@ -0,0 +1,39 @@ +# REQUIRES: x86-registered-target +# +# stpncpy is declared as available in TargetLibraryInfo for FreeBSD, but +# llvm-tli-checker won't be able to find it unless it knows how to check ifuncs. +# This test makes sure that llvm-tli-checker supports processing ifuncs. +# +# RUN: yaml2obj %s -o=%t1 +# RUN: llvm-tli-checker --triple=x86_64-unknown-freebsd %t1 | FileCheck %s +# +# CHECK: == Total TLI yes SDK yes: 1 +# + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + OSABI: ELFOSABI_FREEBSD + Type: ET_DYN + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS + - Name: .rela.plt + Type: SHT_RELA + Flags: [ SHF_ALLOC, SHF_INFO_LINK ] + Address: 0x3CA20 + Link: .dynsym + AddressAlign: 0x8 + Relocations: + - Offset: 0x1E2C68 + Symbol: stpncpy + Type: R_X86_64_JUMP_SLOT +DynamicSymbols: + - Name: stpncpy + Type: STT_GNU_IFUNC + Section: .text + Binding: STB_WEAK + Value: 0x15D5E0 + Size: 0xC diff --git a/llvm/tools/dsymutil/Reproducer.cpp b/llvm/tools/dsymutil/Reproducer.cpp index 31e49cdd0518c..0c1d3f90af299 100644 --- a/llvm/tools/dsymutil/Reproducer.cpp +++ b/llvm/tools/dsymutil/Reproducer.cpp @@ -37,9 +37,10 @@ ReproducerGenerate::ReproducerGenerate(std::error_code &EC, int Argc, char **Argv, bool GenerateOnExit) : Root(createReproducerDir(EC)), GenerateOnExit(GenerateOnExit) { llvm::append_range(Args, ArrayRef(Argv, Argc)); + auto RealFS = vfs::getRealFileSystem(); if (!Root.empty()) - FC = std::make_shared(Root, Root); - VFS = FileCollector::createCollectorVFS(vfs::getRealFileSystem(), FC); + FC = std::make_shared(Root, Root, RealFS); + VFS = FileCollector::createCollectorVFS(std::move(RealFS), FC); } ReproducerGenerate::~ReproducerGenerate() { diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp index 31bf6a9d2d9c8..e09ddb45da6e9 100644 --- a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp +++ b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp @@ -1519,10 +1519,10 @@ class MemoryMatcher { static StringRef detectStubKind(const Session::MemoryRegionInfo &Stub) { using namespace support::endian; - auto Armv7MovWTle = byte_swap(0xe300c000); - auto Armv7BxR12le = byte_swap(0xe12fff1c); - auto Thumbv7MovWTle = byte_swap(0x0c00f240); - auto Thumbv7BxR12le = byte_swap(0x4760); + auto Armv7MovWTle = byte_swap(0xe300c000, endianness::little); + auto Armv7BxR12le = byte_swap(0xe12fff1c, endianness::little); + auto Thumbv7MovWTle = byte_swap(0x0c00f240, endianness::little); + auto Thumbv7BxR12le = byte_swap(0x4760, endianness::little); MemoryMatcher M(Stub.getContent()); if (M.matchMask(Thumbv7MovWTle)) { diff --git a/llvm/tools/llvm-offload-wrapper/llvm-offload-wrapper.cpp b/llvm/tools/llvm-offload-wrapper/llvm-offload-wrapper.cpp index 9dac1646b1e26..d65b402571ae8 100644 --- a/llvm/tools/llvm-offload-wrapper/llvm-offload-wrapper.cpp +++ b/llvm/tools/llvm-offload-wrapper/llvm-offload-wrapper.cpp @@ -84,6 +84,10 @@ static Error wrapImages(ArrayRef> BuffersToWrap) { M, BuffersToWrap.front(), offloading::getOffloadEntryArray(M))) return Err; break; + case llvm::object::OFK_SYCL: + if (Error Err = offloading::wrapSYCLBinaries(M, BuffersToWrap.front())) + return Err; + break; default: return createStringError(getOffloadKindName(Kind) + " wrapping is not supported"); diff --git a/llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp b/llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp index 3cd5d597ee133..0cf8c5c63bef2 100644 --- a/llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp +++ b/llvm/tools/llvm-tli-checker/llvm-tli-checker.cpp @@ -153,8 +153,12 @@ void SDKNameMap::maybeInsertSymbol(const SymbolRef &S, const ObjectFile &O) { uint32_t Flags = unwrapIgnoreError(S.getFlags()); section_iterator Section = unwrapIgnoreError(S.getSection(), /*Default=*/O.section_end()); - if (Type == SymbolRef::ST_Function && (Flags & SymbolRef::SF_Global) && - Section != O.section_end()) { + bool IsRegularFunction = Type == SymbolRef::ST_Function && + (Flags & SymbolRef::SF_Global) && + Section != O.section_end(); + bool IsIFunc = + Type == SymbolRef::ST_Other && (Flags & SymbolRef::SF_Indirect); + if (IsRegularFunction || IsIFunc) { StringRef Name = unwrapIgnoreError(S.getName()); insert({ Name, true }); } diff --git a/llvm/unittests/ADT/APIntTest.cpp b/llvm/unittests/ADT/APIntTest.cpp index 116693c873f30..ca9f9f17ee112 100644 --- a/llvm/unittests/ADT/APIntTest.cpp +++ b/llvm/unittests/ADT/APIntTest.cpp @@ -3718,8 +3718,9 @@ TEST(APIntTest, ScaleBitMask) { TEST(APIntTest, DenseMap) { DenseMap Map; APInt ZeroWidthInt(0, 0, false); - Map.insert({ZeroWidthInt, 0}); - Map.find(ZeroWidthInt); + Map.insert({ZeroWidthInt, 123}); + auto It = Map.find(ZeroWidthInt); + EXPECT_EQ(It->second, 123); } TEST(APIntTest, TryExt) { diff --git a/llvm/unittests/ADT/PackedVectorTest.cpp b/llvm/unittests/ADT/PackedVectorTest.cpp index 30fc7c0b6d07f..df2cbf0e7f0f8 100644 --- a/llvm/unittests/ADT/PackedVectorTest.cpp +++ b/llvm/unittests/ADT/PackedVectorTest.cpp @@ -71,6 +71,14 @@ TEST(PackedVectorTest, RawBitsSize) { EXPECT_EQ(12u, Vec.raw_bits().size()); } +TEST(PackedVectorTest, SignedValueOverwrite) { + PackedVector Vec(1); + Vec[0] = -1; + EXPECT_EQ(-1, Vec[0]); + Vec[0] = 1; + EXPECT_EQ(1, Vec[0]); +} + #ifdef EXPECT_DEBUG_DEATH TEST(PackedVectorTest, UnsignedValues) { diff --git a/llvm/unittests/CodeGen/TypeTraitsTest.cpp b/llvm/unittests/CodeGen/TypeTraitsTest.cpp index dde86280cff6a..1c8852fc1f071 100644 --- a/llvm/unittests/CodeGen/TypeTraitsTest.cpp +++ b/llvm/unittests/CodeGen/TypeTraitsTest.cpp @@ -6,13 +6,16 @@ // //===----------------------------------------------------------------------===// +#include "llvm/CodeGen/RDFRegisters.h" #include "llvm/CodeGen/RegisterPressure.h" #include "llvm/CodeGen/ScheduleDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SlotIndexes.h" #include "llvm/CodeGen/TargetPassConfig.h" #include "gtest/gtest.h" +#include #include +#include using namespace llvm; @@ -23,3 +26,35 @@ static_assert(std::is_trivially_copyable_v, "trivially copyable"); static_assert(std::is_trivially_copyable_v, "trivially copyable"); static_assert(std::is_trivially_copyable_v, "trivially copyable"); + +// https://llvm.org/PR105169 +// Verify that we won't accidently specialize std::less and std::equal_to in a +// wrong way. +// C++17 [namespace.std]/2, C++20/23 [namespace.std]/5: +// A program may explicitly instantiate a template defined in the standard +// library only if the declaration +// - depends on the name of a user-defined type and +// - the instantiation meets the standard library requirements for the +// original template. +template constexpr bool CheckStdCmpRequirements() { + // std::less and std::equal_to are literal, default constructible, and + // copyable classes. + Fn f1; + auto f2 = f1; + auto f3 = std::move(f2); + f2 = f3; + f2 = std::move(f3); + + // Properties held on all known implementations, although not guaranteed by + // the standard. + static_assert(std::is_empty_v); + static_assert(std::is_trivially_default_constructible_v); + static_assert(std::is_trivially_copyable_v); + + return true; +} + +static_assert(CheckStdCmpRequirements>(), + "same as the original template"); +static_assert(CheckStdCmpRequirements>(), + "same as the original template"); diff --git a/llvm/unittests/MC/StringTableBuilderTest.cpp b/llvm/unittests/MC/StringTableBuilderTest.cpp index 05f469a229bf9..44a985be6cfcb 100644 --- a/llvm/unittests/MC/StringTableBuilderTest.cpp +++ b/llvm/unittests/MC/StringTableBuilderTest.cpp @@ -58,8 +58,8 @@ TEST(StringTableBuilderTest, BasicWinCOFF) { std::string Expected; - ExpectedSize = support::endian::byte_swap( - ExpectedSize); + ExpectedSize = support::endian::byte_swap(ExpectedSize, + llvm::endianness::little); Expected.append((const char*)&ExpectedSize, 4); Expected += "pygmy hippopotamus"; Expected += '\x00'; diff --git a/llvm/unittests/Object/DXContainerTest.cpp b/llvm/unittests/Object/DXContainerTest.cpp index 396d060a75bfd..d6f7b26b99cd7 100644 --- a/llvm/unittests/Object/DXContainerTest.cpp +++ b/llvm/unittests/Object/DXContainerTest.cpp @@ -1200,4 +1200,52 @@ TEST(RootSignature, ParseStaticSamplers) { ASSERT_EQ(Sampler.RegisterSpace, 32u); ASSERT_EQ(Sampler.ShaderVisibility, 7u); } + { + // this is testing static sampler parsing for root signature version 1.2, + // it changes: the version number, the size of root signature being emitted + // and the values for flag fields. + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x90, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x54, 0x53, 0x30, 0x4c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0xa4, 0x70, 0x9d, 0x3f, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x85, 0xeb, 0x91, 0x40, 0x66, 0x66, 0x0e, 0x41, + 0x1f, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00}; + DXContainer C = + llvm::cantFail(DXContainer::create(getMemoryBuffer<148>(Buffer))); + + auto MaybeRS = C.getRootSignature(); + ASSERT_TRUE(MaybeRS.has_value()); + const auto &RS = MaybeRS.value(); + ASSERT_EQ(RS.getVersion(), 3U); + ASSERT_EQ(RS.getNumParameters(), 0U); + ASSERT_EQ(RS.getRootParametersOffset(), 0U); + ASSERT_EQ(RS.getNumStaticSamplers(), 1U); + ASSERT_EQ(RS.getStaticSamplersOffset(), 24U); + ASSERT_EQ(RS.getFlags(), 17U); + + auto Sampler = *RS.samplers().begin(); + + ASSERT_EQ(Sampler.Filter, 10U); + ASSERT_EQ(Sampler.AddressU, 1U); + ASSERT_EQ(Sampler.AddressV, 2U); + ASSERT_EQ(Sampler.AddressW, 5U); + ASSERT_FLOAT_EQ(Sampler.MipLODBias, 1.23F); + ASSERT_EQ(Sampler.MaxAnisotropy, 20U); + ASSERT_EQ(Sampler.ComparisonFunc, 4U); + ASSERT_EQ(Sampler.BorderColor, 0U); + ASSERT_FLOAT_EQ(Sampler.MinLOD, 4.56F); + ASSERT_FLOAT_EQ(Sampler.MaxLOD, 8.9F); + ASSERT_EQ(Sampler.ShaderRegister, 31U); + ASSERT_EQ(Sampler.RegisterSpace, 32U); + ASSERT_EQ(Sampler.ShaderVisibility, 7U); + ASSERT_EQ(Sampler.Flags, 1U); + } } diff --git a/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp b/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp index b0ad208625436..1b21fe01dfca9 100644 --- a/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp +++ b/llvm/unittests/ObjectYAML/DXContainerYAMLTest.cpp @@ -526,3 +526,54 @@ TEST(RootSignature, ParseStaticSamplers) { EXPECT_EQ(Storage.size(), 144u); EXPECT_TRUE(memcmp(Buffer, Storage.data(), 144u) == 0); } + +TEST(RootSignature, ParseStaticSamplersV13) { + SmallString<128> Storage; + + // First read a fully explicit yaml with all sizes and offsets provided + ASSERT_TRUE(convert(Storage, R"(--- !dxcontainer +Header: + Hash: [ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ] + Version: + Major: 1 + Minor: 0 + PartCount: 1 + PartOffsets: [ 60 ] +Parts: + - Name: RTS0 + Size: 76 + RootSignature: + Version: 3 + NumRootParameters: 0 + RootParametersOffset: 24 + NumStaticSamplers: 1 + StaticSamplersOffset: 24 + Parameters: [] + Samplers: + - ShaderRegister: 31 + RegisterSpace: 32 + ShaderVisibility: All + SAMPLER_FLAG_UINT_BORDER_COLOR: true + AllowInputAssemblerInputLayout: true + DenyGeometryShaderRootAccess: true + )")); + + uint8_t Buffer[] = { + 0x44, 0x58, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x90, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x54, 0x53, 0x30, 0x4c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0x7f, + 0x1f, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00}; + + EXPECT_EQ(Storage.size(), 148U); + EXPECT_TRUE(memcmp(Buffer, Storage.data(), 148U) == 0); +} diff --git a/llvm/unittests/Support/EndianTest.cpp b/llvm/unittests/Support/EndianTest.cpp index c48b7707b7751..0ee631db74ac1 100644 --- a/llvm/unittests/Support/EndianTest.cpp +++ b/llvm/unittests/Support/EndianTest.cpp @@ -24,16 +24,15 @@ TEST(Endian, Read) { unsigned char littleval[] = {0x00, 0x04, 0x03, 0x02, 0x01}; int32_t BigAsHost = 0x00010203; EXPECT_EQ(BigAsHost, - (endian::read(bigval))); + (endian::read(bigval, llvm::endianness::big))); int32_t LittleAsHost = 0x02030400; - EXPECT_EQ( - LittleAsHost, - (endian::read(littleval))); + EXPECT_EQ(LittleAsHost, (endian::read( + littleval, llvm::endianness::little))); EXPECT_EQ( - (endian::read(bigval + 1)), - (endian::read(littleval + - 1))); + (endian::read(bigval + 1, llvm::endianness::big)), + (endian::read(littleval + 1, + llvm::endianness::little))); } TEST(Endian, WriteNext) { diff --git a/llvm/unittests/Support/FileCollectorTest.cpp b/llvm/unittests/Support/FileCollectorTest.cpp index 184d0e3fdfd17..0ece86947b4f2 100644 --- a/llvm/unittests/Support/FileCollectorTest.cpp +++ b/llvm/unittests/Support/FileCollectorTest.cpp @@ -43,7 +43,8 @@ class TestingFileCollector : public FileCollector { TEST(FileCollectorTest, addFile) { TempDir root("add_file_root", /*Unique*/ true); std::string root_fs(root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); FileCollector.addFile("/path/to/a"); FileCollector.addFile("/path/to/b"); @@ -77,7 +78,8 @@ TEST(FileCollectorTest, addDirectory) { TempFile c(ccc.str()); std::string root_fs(file_root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); FileCollector.addDirectory(file_root.path()); @@ -105,7 +107,8 @@ TEST(FileCollectorTest, copyFiles) { // Create file collector and add files. TempDir root("copy_files_root", /*Unique*/ true); std::string root_fs(root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); FileCollector.addFile(a.path()); FileCollector.addFile(b.path()); FileCollector.addFile(c.path()); @@ -133,7 +136,8 @@ TEST(FileCollectorTest, recordAndConstructDirectory) { // Create file collector and add files. TempDir root("copy_files_root", /*Unique*/ true); std::string root_fs(root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); FileCollector.addFile(a.path()); // The empty directory isn't seen until we add it. @@ -169,7 +173,8 @@ TEST(FileCollectorTest, recordVFSAccesses) { // Create file collector and add files. TempDir root("copy_files_root", /*Unique*/ true); std::string root_fs(root.path()); - auto Collector = std::make_shared(root_fs, root_fs); + auto Collector = std::make_shared( + root_fs, root_fs, vfs::getRealFileSystem()); auto VFS = FileCollector::createCollectorVFS(vfs::getRealFileSystem(), Collector); VFS->status(a.path()); @@ -216,7 +221,8 @@ TEST(FileCollectorTest, Symlinks) { // Root where files are copied to. TempDir reproducer_root("reproducer_root", /*Unique*/ true); std::string root_fs(reproducer_root.path()); - TestingFileCollector FileCollector(root_fs, root_fs); + TestingFileCollector FileCollector(root_fs, root_fs, + vfs::getRealFileSystem()); // Add all the files to the collector. FileCollector.addFile(a.path()); @@ -264,7 +270,8 @@ TEST(FileCollectorTest, recordVFSSymlinkAccesses) { // Create file collector and add files. TempDir root("copy_files_root", true); std::string root_fs(root.path()); - auto Collector = std::make_shared(root_fs, root_fs); + auto Collector = std::make_shared( + root_fs, root_fs, vfs::getRealFileSystem()); auto VFS = FileCollector::createCollectorVFS(vfs::getRealFileSystem(), Collector); SmallString<256> Output; diff --git a/llvm/utils/FileCheck/FileCheck.cpp b/llvm/utils/FileCheck/FileCheck.cpp index 185b6b30994fc..305c28b4c7257 100644 --- a/llvm/utils/FileCheck/FileCheck.cpp +++ b/llvm/utils/FileCheck/FileCheck.cpp @@ -384,7 +384,7 @@ BuildInputAnnotations(const SourceMgr &SM, unsigned CheckFileBufferID, std::vector &Annotations, unsigned &LabelWidth) { struct CompareSMLoc { - bool operator()(const SMLoc &LHS, const SMLoc &RHS) const { + bool operator()(SMLoc LHS, SMLoc RHS) const { return LHS.getPointer() < RHS.getPointer(); } }; diff --git a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp index bc42efa3b2e9c..be7537c83da3a 100644 --- a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp +++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp @@ -21,6 +21,13 @@ #include using namespace llvm; +// As the type of more than one return values is represented as an anonymous +// struct, which is encoded with `IIT_STRUCT` followed by a byte specifying +// the number of return values, starting from 2 (encoded as 0) to 257 +// (encoded as 255). So, the maximum number of values that an intrinsic can +// return is 257. +static constexpr unsigned MaxNumReturn = 257; + //===----------------------------------------------------------------------===// // CodeGenIntrinsic Implementation //===----------------------------------------------------------------------===// @@ -29,15 +36,6 @@ CodeGenIntrinsicContext::CodeGenIntrinsicContext(const RecordKeeper &RC) { for (const Record *Rec : RC.getAllDerivedDefinitions("IntrinsicProperty")) if (Rec->getValueAsBit("IsDefault")) DefaultProperties.push_back(Rec); - - // The maximum number of values that an intrinsic can return is the size of - // of `IIT_RetNumbers` list - 1 (since we index into this list using the - // number of return values as the index). - const auto *IIT_RetNumbers = - dyn_cast_or_null(RC.getGlobal("IIT_RetNumbers")); - if (!IIT_RetNumbers) - PrintFatalError("unable to find 'IIT_RetNumbers' list"); - MaxNumReturn = IIT_RetNumbers->size() - 1; } CodeGenIntrinsicTable::CodeGenIntrinsicTable(const RecordKeeper &RC) { @@ -302,11 +300,10 @@ CodeGenIntrinsic::CodeGenIntrinsic(const Record *R, } unsigned NumRet = R->getValueAsListInit("RetTypes")->size(); - if (NumRet > Ctx.MaxNumReturn) + if (NumRet > MaxNumReturn) PrintFatalError(DefLoc, "intrinsics can only return upto " + - Twine(Ctx.MaxNumReturn) + " values, '" + - DefName + "' returns " + Twine(NumRet) + - " values"); + Twine(MaxNumReturn) + " values, '" + DefName + + "' returns " + Twine(NumRet) + " values"); const Record *TypeInfo = R->getValueAsDef("TypeInfo"); if (!TypeInfo->isSubClassOf("TypeInfoGen")) diff --git a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h index 676f575b2749d..2e86149514f46 100644 --- a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h +++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.h @@ -30,9 +30,6 @@ class RecordKeeper; struct CodeGenIntrinsicContext { explicit CodeGenIntrinsicContext(const RecordKeeper &RC); std::vector DefaultProperties; - - // Maximum number of values an intrinsic can return. - unsigned MaxNumReturn; }; struct CodeGenIntrinsic { diff --git a/llvm/utils/git/code-format-helper.py b/llvm/utils/git/code-format-helper.py index 7a5311d668f79..6f809c5977c75 100755 --- a/llvm/utils/git/code-format-helper.py +++ b/llvm/utils/git/code-format-helper.py @@ -173,7 +173,8 @@ def run(self, changed_files: List[str], args: FormatArgs) -> bool: f":warning: The {self.friendly_name} failed without printing " "a diff. Check the logs for stderr output. :warning:" ) - self.update_pr(comment_text, args, create_new=False) + if should_update_gh: + self.update_pr(comment_text, args, create_new=False) return False diff --git a/llvm/utils/gn/secondary/clang/lib/Tooling/DependencyScanning/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Tooling/DependencyScanning/BUILD.gn index 739e2fbb35982..6733cf4ef3772 100644 --- a/llvm/utils/gn/secondary/clang/lib/Tooling/DependencyScanning/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/lib/Tooling/DependencyScanning/BUILD.gn @@ -18,6 +18,7 @@ static_library("DependencyScanning") { "//llvm/lib/TargetParser", ] sources = [ + "DependencyScannerImpl.cpp", "DependencyScanningFilesystem.cpp", "DependencyScanningService.cpp", "DependencyScanningTool.cpp", diff --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py index 9ae8ac75bee08..a7e2705f609af 100644 --- a/llvm/utils/lit/lit/TestRunner.py +++ b/llvm/utils/lit/lit/TestRunner.py @@ -1541,8 +1541,10 @@ def regex_escape(s): return s path_substitutions = [ - ("s", sourcepath), ("S", sourcedir), ("p", sourcedir), - ("t", tmpName), ("T", tmpDir) + ("s", sourcepath), + ("S", sourcedir), + ("p", sourcedir), + ("t", tmpName), ] for path_substitution in path_substitutions: letter = path_substitution[0] @@ -1919,6 +1921,14 @@ def processLine(ln): # seems reasonable. ln = _caching_re_compile(a).sub(str(b), escapePercents(ln)) + # TODO(boomanaiden154): Remove when we branch LLVM 22 so people on the + # release branch will have sufficient time to migrate. + if bool(_caching_re_compile("%T").search(ln)): + raise ValueError( + "%T is no longer supported. Please create directories with names " + "based on %t." + ) + # Strip the trailing newline and any extra whitespace. return ln.strip() diff --git a/llvm/utils/lit/tests/Inputs/shtest-shell/capital-t-error-message.txt b/llvm/utils/lit/tests/Inputs/shtest-shell/capital-t-error-message.txt new file mode 100644 index 0000000000000..e69dfee8fced8 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/shtest-shell/capital-t-error-message.txt @@ -0,0 +1,2 @@ +# Check that we return a decent error message when someone uses %T +# RUN: echo %T > %t diff --git a/llvm/utils/lit/tests/Inputs/shtest-shell/valid-shell.txt b/llvm/utils/lit/tests/Inputs/shtest-shell/valid-shell.txt index 75ce8b7733ad7..cfa829f0bf2f7 100644 --- a/llvm/utils/lit/tests/Inputs/shtest-shell/valid-shell.txt +++ b/llvm/utils/lit/tests/Inputs/shtest-shell/valid-shell.txt @@ -18,15 +18,15 @@ # Check force remove commands success whether the directory does or doesn't exist. # # Check the mkdir command with -p option. -# RUN: rm -f -r %T/test -# RUN: %{python} %S/../check_path.py dir %T/test > %t.out +# RUN: rm -f -r %t.test +# RUN: %{python} %S/../check_path.py dir %t.test > %t.out # RUN: FileCheck --check-prefix=REMOVE-PARENT-DIR < %t.out %s -# RUN: mkdir -p %T/test -# RUN: %{python} %S/../check_path.py dir %T/test > %t.out +# RUN: mkdir -p %t.test +# RUN: %{python} %S/../check_path.py dir %t.test > %t.out # RUN: FileCheck --check-prefix=MAKE-PARENT-DIR < %t.out %s -# RUN: rm -f %T/test || true -# RUN: rm -f -r %T/test -# RUN: %{python} %S/../check_path.py dir %T/test > %t.out +# RUN: rm -f %t.test || true +# RUN: rm -f -r %t.test +# RUN: %{python} %S/../check_path.py dir %t.test > %t.out # RUN: FileCheck --check-prefix=REMOVE-PARENT-DIR < %t.out %s # # MAKE-PARENT-DIR: True @@ -34,15 +34,15 @@ # # Check the mkdir command without -p option. # -# RUN: rm -rf %T/test1 -# RUN: mkdir %T/test1 -# RUN: %{python} %S/../check_path.py dir %T/test1 > %t.out +# RUN: rm -rf %t.test1 +# RUN: mkdir %t.test1 +# RUN: %{python} %S/../check_path.py dir %t.test1 > %t.out # RUN: FileCheck --check-prefix=MAKE-DIR < %t.out %s -# RUN: cd %T/test1 && mkdir foo -# RUN: %{python} %S/../check_path.py dir %T/test1 > %t.out +# RUN: cd %t.test1 && mkdir foo +# RUN: %{python} %S/../check_path.py dir %t.test1 > %t.out # RUN: FileCheck --check-prefix=MAKE-DIR < %t.out %s -# RUN: cd %T && rm -rf %T/test1 -# RUN: %{python} %S/../check_path.py dir %T/test1 > %t.out +# RUN: cd .. && rm -rf %t.test1 +# RUN: %{python} %S/../check_path.py dir %t.test1 > %t.out # RUN: FileCheck --check-prefix=REMOVE-DIR < %t.out %s # # MAKE-DIR: True @@ -50,18 +50,18 @@ # # Check creating and removing multiple folders and rm * operation. # -# RUN: rm -rf %T/test -# RUN: mkdir -p %T/test/test1 %T/test/test2 -# RUN: %{python} %S/../check_path.py dir %T/test %T/test/test1 %T/test/test2 > %t.out +# RUN: rm -rf %t.test +# RUN: mkdir -p %t.test/test1 %t.test/test2 +# RUN: %{python} %S/../check_path.py dir %t.test %t.test/test1 %t.test/test2 > %t.out # RUN: FileCheck --check-prefix=DIRS-EXIST < %t.out %s -# RUN: mkdir %T/test || true -# RUN: echo "create a temp file" > %T/test/temp.write -# RUN: echo "create a temp1 file" > %T/test/test1/temp1.write -# RUN: echo "create a temp2 file" > %T/test/test2/temp2.write -# RUN: %{python} %S/../check_path.py file %T/test/temp.write %T/test/test1/temp1.write %T/test/test2/temp2.write> %t.out +# RUN: mkdir %t.test || true +# RUN: echo "create a temp file" > %t.test/temp.write +# RUN: echo "create a temp1 file" > %t.test/test1/temp1.write +# RUN: echo "create a temp2 file" > %t.test/test2/temp2.write +# RUN: %{python} %S/../check_path.py file %t.test/temp.write %t.test/test1/temp1.write %t.test/test2/temp2.write> %t.out # RUN: FileCheck --check-prefix=FILES-EXIST < %t.out %s -# RUN: rm -r -f %T/* -# RUN: %{python} %S/../check_path.py dir %T/test > %t.out +# RUN: rm -r -f %t* +# RUN: %{python} %S/../check_path.py dir %t.test > %t.out # RUN: FileCheck --check-prefix=REMOVE-ALL < %t.out %s # # DIRS-EXIST: True @@ -81,7 +81,7 @@ # RUN: echo "hello-2" > %t1.stdout # RUN: diff %t.stdout %t1.stdout || true # -# RUN: mkdir -p %T/dir1 %T/dir2 -# RUN: cd %T/dir1 && echo "hello" > temp1.txt -# RUN: cd %T/dir2 && echo "hello" > temp2.txt -# RUN: diff temp2.txt ../dir1/temp1.txt +# RUN: mkdir -p %t.dir1 %t.dir2 +# RUN: cd %t.dir1 && echo "hello" > temp1.txt +# RUN: cd %t.dir2 && echo "hello" > temp2.txt +# RUN: diff temp2.txt ../%{t:stem}.tmp.dir1/temp1.txt diff --git a/llvm/utils/lit/tests/shtest-shell.py b/llvm/utils/lit/tests/shtest-shell.py index 498f6bb0adc11..38db1b75486cf 100644 --- a/llvm/utils/lit/tests/shtest-shell.py +++ b/llvm/utils/lit/tests/shtest-shell.py @@ -12,6 +12,10 @@ # CHECK: -- Testing: +# CHECK: UNRESOLVED: shtest-shell :: capital-t-error-message.txt +# CHECK: *** TEST 'shtest-shell :: capital-t-error-message.txt' FAILED *** +# CHECK: ValueError: %T is no longer supported. Please create directories with names based on %t. + # CHECK: FAIL: shtest-shell :: colon-error.txt # CHECK: *** TEST 'shtest-shell :: colon-error.txt' FAILED *** # CHECK: : @@ -633,5 +637,5 @@ # CHECK: *** # CHECK: PASS: shtest-shell :: valid-shell.txt -# CHECK: Unresolved Tests (1) +# CHECK: Unresolved Tests (2) # CHECK: Failed Tests (37) diff --git a/mlir/docs/DefiningDialects/Operations.md b/mlir/docs/DefiningDialects/Operations.md index f988bebea1223..7c1be84727476 100644 --- a/mlir/docs/DefiningDialects/Operations.md +++ b/mlir/docs/DefiningDialects/Operations.md @@ -1649,6 +1649,15 @@ inline constexpr MyBitEnum operator&(MyBitEnum a, MyBitEnum b) { inline constexpr MyBitEnum operator^(MyBitEnum a, MyBitEnum b) { return static_cast(static_cast(a) ^ static_cast(b)); } +inline constexpr MyBitEnum &operator|=(MyBitEnum &a, MyBitEnum b) { + return a = a | b; +} +inline constexpr MyBitEnum &operator&=(MyBitEnum &a, MyBitEnum b) { + return a = a & b; +} +inline constexpr MyBitEnum &operator^=(MyBitEnum &a, MyBitEnum b) { + return a = a ^ b; +} inline constexpr MyBitEnum operator~(MyBitEnum bits) { // Ensure only bits that can be present in the enum are set return static_cast(~static_cast(bits) & static_cast(15u)); diff --git a/mlir/examples/standalone/include/Standalone/StandalonePasses.td b/mlir/examples/standalone/include/Standalone/StandalonePasses.td index 4cb2be02e4a20..d5aad34f2f457 100644 --- a/mlir/examples/standalone/include/Standalone/StandalonePasses.td +++ b/mlir/examples/standalone/include/Standalone/StandalonePasses.td @@ -1,4 +1,4 @@ -//===- StandalonePsss.td - Standalone dialect passes -------*- tablegen -*-===// +//===- StandalonePasses.td - Standalone dialect passes -------*- tablegen -*-===// // // This file is licensed under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/mlir/include/mlir-c/Dialect/LLVM.h b/mlir/include/mlir-c/Dialect/LLVM.h index 65b14254e4492..c1ade9ed8617c 100644 --- a/mlir/include/mlir-c/Dialect/LLVM.h +++ b/mlir/include/mlir-c/Dialect/LLVM.h @@ -306,7 +306,8 @@ typedef enum MlirLLVMDINameTableKind MlirLLVMDINameTableKind; MLIR_CAPI_EXPORTED MlirAttribute mlirLLVMDICompileUnitAttrGet( MlirContext ctx, MlirAttribute id, unsigned int sourceLanguage, MlirAttribute file, MlirAttribute producer, bool isOptimized, - MlirLLVMDIEmissionKind emissionKind, MlirLLVMDINameTableKind nameTableKind); + MlirLLVMDIEmissionKind emissionKind, MlirLLVMDINameTableKind nameTableKind, + MlirAttribute splitDebugFilename); /// Creates a LLVM DIFlags attribute. MLIR_CAPI_EXPORTED MlirAttribute mlirLLVMDIFlagsAttrGet(MlirContext ctx, diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td index 75bce6b0a0e54..147f8c2040049 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td @@ -425,17 +425,19 @@ def LLVM_DICompileUnitAttr : LLVM_Attr<"DICompileUnit", "di_compile_unit", OptionalParameter<"StringAttr">:$producer, "bool":$isOptimized, "DIEmissionKind":$emissionKind, - OptionalParameter<"DINameTableKind">:$nameTableKind + OptionalParameter<"DINameTableKind">:$nameTableKind, + OptionalParameter<"StringAttr">:$splitDebugFilename ); let builders = [ AttrBuilderWithInferredContext<(ins "DistinctAttr":$id, "unsigned":$sourceLanguage, "DIFileAttr":$file, "StringAttr":$producer, "bool":$isOptimized, "DIEmissionKind":$emissionKind, - CArg<"DINameTableKind", "DINameTableKind::Default">:$nameTableKind + CArg<"DINameTableKind", "DINameTableKind::Default">:$nameTableKind, + CArg<"StringAttr", "{}">:$splitDebugFilename ), [{ return $_get(id.getContext(), id, sourceLanguage, file, producer, - isOptimized, emissionKind, nameTableKind); + isOptimized, emissionKind, nameTableKind, splitDebugFilename); }]> ]; let assemblyFormat = "`<` struct(params) `>`"; diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h index 41670249936e6..7266687584b38 100644 --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1858,6 +1858,7 @@ void populateDecomposePadPatterns(RewritePatternSet &patterns); /// Populates patterns to transform linalg.conv_2d_xxx operations into /// linalg.generic (for img2col packing) and linalg.matmul. +/// Note: currently limited to Tensor semantics only. /// \see rewriteInIm2Col for more details. void populateConvertConv2DToImg2ColPatterns(RewritePatternSet &patterns); diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td index 9dbe6897a3304..f693a0737e0fc 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td @@ -230,14 +230,24 @@ def TargetRegionFlagsNone : I32BitEnumAttrCaseNone<"none">; def TargetRegionFlagsGeneric : I32BitEnumAttrCaseBit<"generic", 0>; def TargetRegionFlagsSpmd : I32BitEnumAttrCaseBit<"spmd", 1>; def TargetRegionFlagsTripCount : I32BitEnumAttrCaseBit<"trip_count", 2>; +def TargetRegionFlagsNoLoop : I32BitEnumAttrCaseBit<"no_loop", 3>; def TargetRegionFlags : OpenMP_BitEnumAttr< "TargetRegionFlags", - "target region property flags", [ + "These flags describe properties of the target kernel. " + "TargetRegionFlagsGeneric - denotes generic kernel. " + "TargetRegionFlagsSpmd - denotes SPMD kernel. " + "TargetRegionFlagsNoLoop - denotes kernel where " + "num_teams * num_threads >= loop_trip_count. It allows the conversion " + "of loops into sequential code by ensuring that each team/thread " + "executes at most one iteration. " + "TargetRegionFlagsTripCount - checks if the loop trip count should be " + "calculated.", [ TargetRegionFlagsNone, TargetRegionFlagsGeneric, TargetRegionFlagsSpmd, - TargetRegionFlagsTripCount + TargetRegionFlagsTripCount, + TargetRegionFlagsNoLoop ]>; //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td index 553d69cc21d17..93ab120339d55 100644 --- a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td +++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td @@ -282,8 +282,7 @@ def Tosa_Shape : Tosa_Type<"shape", "shape"> { !tosa.shape<0> ``` }]; - let parameters = (ins "int" : $rank); - let builders = [TypeBuilder<(ins "int" : $rank)>]; + let parameters = (ins "int":$rank); let assemblyFormat = "`<` $rank `>`"; let genVerifyDecl = 1; diff --git a/mlir/include/mlir/TableGen/Class.h b/mlir/include/mlir/TableGen/Class.h index 10349676625d1..e6bedc7cc896d 100644 --- a/mlir/include/mlir/TableGen/Class.h +++ b/mlir/include/mlir/TableGen/Class.h @@ -789,6 +789,10 @@ class Class { std::forward(args)...); } + const std::vector> &getMethods() const { + return methods; + } + /// Add a new field to the class. Class fields added this way are always /// private. template diff --git a/mlir/include/mlir/Transforms/FoldUtils.h b/mlir/include/mlir/Transforms/FoldUtils.h index 2e7a6fe3e362c..ee89e8d0e7c3f 100644 --- a/mlir/include/mlir/Transforms/FoldUtils.h +++ b/mlir/include/mlir/Transforms/FoldUtils.h @@ -40,7 +40,10 @@ class OperationFolder { /// deduplicated constants. If successful, replaces `op`'s uses with /// folded results, and returns success. If the op was completely folded it is /// erased. If it is just updated in place, `inPlaceUpdate` is set to true. - LogicalResult tryToFold(Operation *op, bool *inPlaceUpdate = nullptr); + /// On success() and when in-place, the folder is invoked until + /// `maxIterations` is reached (default INT_MAX). + LogicalResult tryToFold(Operation *op, bool *inPlaceUpdate = nullptr, + int maxIterations = INT_MAX); /// Tries to fold a pre-existing constant operation. `constValue` represents /// the value of the constant, and can be optionally passed if the value is @@ -82,7 +85,10 @@ class OperationFolder { /// Tries to perform folding on the given `op`. If successful, populates /// `results` with the results of the folding. - LogicalResult tryToFold(Operation *op, SmallVectorImpl &results); + /// On success() and when in-place, the folder is invoked until + /// `maxIterations` is reached (default INT_MAX). + LogicalResult tryToFold(Operation *op, SmallVectorImpl &results, + int maxIterations = INT_MAX); /// Try to process a set of fold results. Populates `results` on success, /// otherwise leaves it unchanged. diff --git a/mlir/lib/Analysis/DataFlow/LivenessAnalysis.cpp b/mlir/lib/Analysis/DataFlow/LivenessAnalysis.cpp index 65df355216f74..d705d8d4c7819 100644 --- a/mlir/lib/Analysis/DataFlow/LivenessAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/LivenessAnalysis.cpp @@ -109,19 +109,19 @@ LivenessAnalysis::visitOperation(Operation *op, ArrayRef operands, foundLiveResult = true; } LDBG() << "[visitOperation] Adding dependency for result: " << r - << " after op: " << *op; + << " after op: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); addDependency(const_cast(r), getProgramPointAfter(op)); } return success(); } void LivenessAnalysis::visitBranchOperand(OpOperand &operand) { + Operation *op = operand.getOwner(); LDBG() << "Visiting branch operand: " << operand.get() - << " in op: " << *operand.getOwner(); + << " in op: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); // We know (at the moment) and assume (for the future) that `operand` is a // non-forwarded branch operand of a `RegionBranchOpInterface`, // `BranchOpInterface`, `RegionBranchTerminatorOpInterface` or return-like op. - Operation *op = operand.getOwner(); assert((isa(op) || isa(op) || isa(op)) && "expected the op to be `RegionBranchOpInterface`, " @@ -146,12 +146,13 @@ void LivenessAnalysis::visitBranchOperand(OpOperand &operand) { // Therefore, if the result value is live, we conservatively consider the // non-forwarded operand of the region branch operation with result may // live and record all result. - for (Value result : op->getResults()) { + for (auto [resultIndex, result] : llvm::enumerate(op->getResults())) { if (getLatticeElement(result)->isLive) { mayLive = true; - LDBG() << "[visitBranchOperand] Non-forwarded branch " - "operand may be live due to live result: " - << result; + LDBG() << "[visitBranchOperand] Non-forwarded branch operand may be " + "live due to live result #" + << resultIndex << ": " + << OpWithFlags(op, OpPrintingFlags().skipRegions()); break; } } @@ -233,7 +234,8 @@ void LivenessAnalysis::visitBranchOperand(OpOperand &operand) { SmallVector resultsLiveness; for (const Value result : op->getResults()) resultsLiveness.push_back(getLatticeElement(result)); - LDBG() << "Visiting operation for non-forwarded branch operand: " << *op; + LDBG() << "Visiting operation for non-forwarded branch operand: " + << OpWithFlags(op, OpPrintingFlags().skipRegions()); (void)visitOperation(op, operandLiveness, resultsLiveness); // We also visit the parent op with the parent's results and this operand if @@ -299,8 +301,6 @@ RunLivenessAnalysis::RunLivenessAnalysis(Operation *op) { // The framework doesn't visit operations in dead blocks, so we need to // explicitly mark them as dead. op->walk([&](Operation *op) { - if (op->getNumResults() == 0) - return; for (auto result : llvm::enumerate(op->getResults())) { if (getLiveness(result.value())) continue; diff --git a/mlir/lib/Bindings/Python/DialectLLVM.cpp b/mlir/lib/Bindings/Python/DialectLLVM.cpp index 55b9331270cdc..38de4a0e329a0 100644 --- a/mlir/lib/Bindings/Python/DialectLLVM.cpp +++ b/mlir/lib/Bindings/Python/DialectLLVM.cpp @@ -33,21 +33,37 @@ static void populateDialectLLVMSubmodule(const nanobind::module_ &m) { auto llvmStructType = mlir_type_subclass(m, "StructType", mlirTypeIsALLVMStructType); - llvmStructType.def_classmethod( - "get_literal", - [](const nb::object &cls, const std::vector &elements, - bool packed, MlirLocation loc) { - CollectDiagnosticsToStringScope scope(mlirLocationGetContext(loc)); - - MlirType type = mlirLLVMStructTypeLiteralGetChecked( - loc, elements.size(), elements.data(), packed); - if (mlirTypeIsNull(type)) { - throw nb::value_error(scope.takeMessage().c_str()); - } - return cls(type); - }, - "cls"_a, "elements"_a, nb::kw_only(), "packed"_a = false, - "loc"_a = nb::none()); + llvmStructType + .def_classmethod( + "get_literal", + [](const nb::object &cls, const std::vector &elements, + bool packed, MlirLocation loc) { + CollectDiagnosticsToStringScope scope(mlirLocationGetContext(loc)); + + MlirType type = mlirLLVMStructTypeLiteralGetChecked( + loc, elements.size(), elements.data(), packed); + if (mlirTypeIsNull(type)) { + throw nb::value_error(scope.takeMessage().c_str()); + } + return cls(type); + }, + "cls"_a, "elements"_a, nb::kw_only(), "packed"_a = false, + "loc"_a = nb::none()) + .def_classmethod( + "get_literal_unchecked", + [](const nb::object &cls, const std::vector &elements, + bool packed, MlirContext context) { + CollectDiagnosticsToStringScope scope(context); + + MlirType type = mlirLLVMStructTypeLiteralGet( + context, elements.size(), elements.data(), packed); + if (mlirTypeIsNull(type)) { + throw nb::value_error(scope.takeMessage().c_str()); + } + return cls(type); + }, + "cls"_a, "elements"_a, nb::kw_only(), "packed"_a = false, + "context"_a = nb::none()); llvmStructType.def_classmethod( "get_identified", diff --git a/mlir/lib/Bindings/Python/IRAttributes.cpp b/mlir/lib/Bindings/Python/IRAttributes.cpp index c77653f97e6dd..045c0fbf4630f 100644 --- a/mlir/lib/Bindings/Python/IRAttributes.cpp +++ b/mlir/lib/Bindings/Python/IRAttributes.cpp @@ -575,6 +575,18 @@ class PyFloatAttribute : public PyConcreteAttribute { }, nb::arg("type"), nb::arg("value"), nb::arg("loc") = nb::none(), "Gets an uniqued float point attribute associated to a type"); + c.def_static( + "get_unchecked", + [](PyType &type, double value, DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirAttribute attr = + mlirFloatAttrDoubleGet(context.get()->get(), type, value); + if (mlirAttributeIsNull(attr)) + throw MLIRError("Invalid attribute", errors.take()); + return PyFloatAttribute(type.getContext(), attr); + }, + nb::arg("type"), nb::arg("value"), nb::arg("context") = nb::none(), + "Gets an uniqued float point attribute associated to a type"); c.def_static( "get_f32", [](double value, DefaultingPyMlirContext context) { diff --git a/mlir/lib/Bindings/Python/IRTypes.cpp b/mlir/lib/Bindings/Python/IRTypes.cpp index 07dc00521833f..3488d92250b45 100644 --- a/mlir/lib/Bindings/Python/IRTypes.cpp +++ b/mlir/lib/Bindings/Python/IRTypes.cpp @@ -639,11 +639,16 @@ class PyVectorType : public PyConcreteType { using PyConcreteType::PyConcreteType; static void bindDerived(ClassTy &c) { - c.def_static("get", &PyVectorType::get, nb::arg("shape"), + c.def_static("get", &PyVectorType::getChecked, nb::arg("shape"), nb::arg("element_type"), nb::kw_only(), nb::arg("scalable") = nb::none(), nb::arg("scalable_dims") = nb::none(), nb::arg("loc") = nb::none(), "Create a vector type") + .def_static("get_unchecked", &PyVectorType::get, nb::arg("shape"), + nb::arg("element_type"), nb::kw_only(), + nb::arg("scalable") = nb::none(), + nb::arg("scalable_dims") = nb::none(), + nb::arg("context") = nb::none(), "Create a vector type") .def_prop_ro( "scalable", [](MlirType self) { return mlirVectorTypeIsScalable(self); }) @@ -658,10 +663,11 @@ class PyVectorType : public PyConcreteType { } private: - static PyVectorType get(std::vector shape, PyType &elementType, - std::optional scalable, - std::optional> scalableDims, - DefaultingPyLocation loc) { + static PyVectorType + getChecked(std::vector shape, PyType &elementType, + std::optional scalable, + std::optional> scalableDims, + DefaultingPyLocation loc) { if (scalable && scalableDims) { throw nb::value_error("'scalable' and 'scalable_dims' kwargs " "are mutually exclusive."); @@ -696,6 +702,42 @@ class PyVectorType : public PyConcreteType { throw MLIRError("Invalid type", errors.take()); return PyVectorType(elementType.getContext(), type); } + + static PyVectorType get(std::vector shape, PyType &elementType, + std::optional scalable, + std::optional> scalableDims, + DefaultingPyMlirContext context) { + if (scalable && scalableDims) { + throw nb::value_error("'scalable' and 'scalable_dims' kwargs " + "are mutually exclusive."); + } + + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirType type; + if (scalable) { + if (scalable->size() != shape.size()) + throw nb::value_error("Expected len(scalable) == len(shape)."); + + SmallVector scalableDimFlags = llvm::to_vector(llvm::map_range( + *scalable, [](const nb::handle &h) { return nb::cast(h); })); + type = mlirVectorTypeGetScalable(shape.size(), shape.data(), + scalableDimFlags.data(), elementType); + } else if (scalableDims) { + SmallVector scalableDimFlags(shape.size(), false); + for (int64_t dim : *scalableDims) { + if (static_cast(dim) >= scalableDimFlags.size() || dim < 0) + throw nb::value_error("Scalable dimension index out of bounds."); + scalableDimFlags[dim] = true; + } + type = mlirVectorTypeGetScalable(shape.size(), shape.data(), + scalableDimFlags.data(), elementType); + } else { + type = mlirVectorTypeGet(shape.size(), shape.data(), elementType); + } + if (mlirTypeIsNull(type)) + throw MLIRError("Invalid type", errors.take()); + return PyVectorType(elementType.getContext(), type); + } }; /// Ranked Tensor Type subclass - RankedTensorType. @@ -724,6 +766,22 @@ class PyRankedTensorType nb::arg("shape"), nb::arg("element_type"), nb::arg("encoding") = nb::none(), nb::arg("loc") = nb::none(), "Create a ranked tensor type"); + c.def_static( + "get_unchecked", + [](std::vector shape, PyType &elementType, + std::optional &encodingAttr, + DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirType t = mlirRankedTensorTypeGet( + shape.size(), shape.data(), elementType, + encodingAttr ? encodingAttr->get() : mlirAttributeGetNull()); + if (mlirTypeIsNull(t)) + throw MLIRError("Invalid type", errors.take()); + return PyRankedTensorType(elementType.getContext(), t); + }, + nb::arg("shape"), nb::arg("element_type"), + nb::arg("encoding") = nb::none(), nb::arg("context") = nb::none(), + "Create a ranked tensor type"); c.def_prop_ro( "encoding", [](PyRankedTensorType &self) @@ -758,6 +816,17 @@ class PyUnrankedTensorType }, nb::arg("element_type"), nb::arg("loc") = nb::none(), "Create a unranked tensor type"); + c.def_static( + "get_unchecked", + [](PyType &elementType, DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirType t = mlirUnrankedTensorTypeGet(elementType); + if (mlirTypeIsNull(t)) + throw MLIRError("Invalid type", errors.take()); + return PyUnrankedTensorType(elementType.getContext(), t); + }, + nb::arg("element_type"), nb::arg("context") = nb::none(), + "Create a unranked tensor type"); } }; @@ -790,6 +859,27 @@ class PyMemRefType : public PyConcreteType { nb::arg("shape"), nb::arg("element_type"), nb::arg("layout") = nb::none(), nb::arg("memory_space") = nb::none(), nb::arg("loc") = nb::none(), "Create a memref type") + .def_static( + "get_unchecked", + [](std::vector shape, PyType &elementType, + PyAttribute *layout, PyAttribute *memorySpace, + DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirAttribute layoutAttr = + layout ? *layout : mlirAttributeGetNull(); + MlirAttribute memSpaceAttr = + memorySpace ? *memorySpace : mlirAttributeGetNull(); + MlirType t = + mlirMemRefTypeGet(elementType, shape.size(), shape.data(), + layoutAttr, memSpaceAttr); + if (mlirTypeIsNull(t)) + throw MLIRError("Invalid type", errors.take()); + return PyMemRefType(elementType.getContext(), t); + }, + nb::arg("shape"), nb::arg("element_type"), + nb::arg("layout") = nb::none(), + nb::arg("memory_space") = nb::none(), + nb::arg("context") = nb::none(), "Create a memref type") .def_prop_ro( "layout", [](PyMemRefType &self) -> nb::typed { @@ -858,6 +948,22 @@ class PyUnrankedMemRefType }, nb::arg("element_type"), nb::arg("memory_space").none(), nb::arg("loc") = nb::none(), "Create a unranked memref type") + .def_static( + "get_unchecked", + [](PyType &elementType, PyAttribute *memorySpace, + DefaultingPyMlirContext context) { + PyMlirContext::ErrorCapture errors(context->getRef()); + MlirAttribute memSpaceAttr = {}; + if (memorySpace) + memSpaceAttr = *memorySpace; + + MlirType t = mlirUnrankedMemRefTypeGet(elementType, memSpaceAttr); + if (mlirTypeIsNull(t)) + throw MLIRError("Invalid type", errors.take()); + return PyUnrankedMemRefType(elementType.getContext(), t); + }, + nb::arg("element_type"), nb::arg("memory_space").none(), + nb::arg("context") = nb::none(), "Create a unranked memref type") .def_prop_ro( "memory_space", [](PyUnrankedMemRefType &self) diff --git a/mlir/lib/CAPI/Dialect/LLVM.cpp b/mlir/lib/CAPI/Dialect/LLVM.cpp index 7a33046c6c872..eaad8a87aab9b 100644 --- a/mlir/lib/CAPI/Dialect/LLVM.cpp +++ b/mlir/lib/CAPI/Dialect/LLVM.cpp @@ -253,17 +253,16 @@ MlirAttribute mlirLLVMDIFileAttrGet(MlirContext ctx, MlirAttribute name, cast(unwrap(directory)))); } -MlirAttribute -mlirLLVMDICompileUnitAttrGet(MlirContext ctx, MlirAttribute id, - unsigned int sourceLanguage, MlirAttribute file, - MlirAttribute producer, bool isOptimized, - MlirLLVMDIEmissionKind emissionKind, - MlirLLVMDINameTableKind nameTableKind) { +MlirAttribute mlirLLVMDICompileUnitAttrGet( + MlirContext ctx, MlirAttribute id, unsigned int sourceLanguage, + MlirAttribute file, MlirAttribute producer, bool isOptimized, + MlirLLVMDIEmissionKind emissionKind, MlirLLVMDINameTableKind nameTableKind, + MlirAttribute splitDebugFilename) { return wrap(DICompileUnitAttr::get( unwrap(ctx), cast(unwrap(id)), sourceLanguage, cast(unwrap(file)), cast(unwrap(producer)), - isOptimized, DIEmissionKind(emissionKind), - DINameTableKind(nameTableKind))); + isOptimized, DIEmissionKind(emissionKind), DINameTableKind(nameTableKind), + cast(unwrap(splitDebugFilename)))); } MlirAttribute mlirLLVMDIFlagsAttrGet(MlirContext ctx, uint64_t value) { diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp index 1037e296c8128..a73afbcb6474b 100644 --- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp +++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp @@ -663,7 +663,7 @@ static IntegerAttr wrapNumericMemorySpace(MLIRContext *ctx, unsigned space) { /// Generates a symbol with 0-sized array type for dynamic shared memory usage, /// or uses existing symbol. -LLVM::GlobalOp getDynamicSharedMemorySymbol( +static LLVM::GlobalOp getDynamicSharedMemorySymbol( ConversionPatternRewriter &rewriter, gpu::GPUModuleOp moduleOp, gpu::DynamicSharedMemoryOp op, const LLVMTypeConverter *typeConverter, MemRefType memrefType, unsigned alignmentBit) { diff --git a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp index 49d06497dbeea..f44552c4556c2 100644 --- a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp +++ b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp @@ -512,7 +512,7 @@ calculateMemoryRequirements(Value accessedPtr, bool isNontemporal, if (!sizeInBytes.has_value()) return failure(); - memoryAccess = memoryAccess | spirv::MemoryAccess::Aligned; + memoryAccess |= spirv::MemoryAccess::Aligned; auto memAccessAttr = spirv::MemoryAccessAttr::get(ctx, memoryAccess); auto alignmentValue = preferredAlignment ? preferredAlignment : *sizeInBytes; auto alignment = IntegerAttr::get(IntegerType::get(ctx, 32), alignmentValue); diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp index 79cb49a4f7dbc..d6a262275be3d 100644 --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -741,7 +741,7 @@ creatLdMatrixCompatibleLoads(RewriterBase &rewriter, vector::TransferReadOp op, } // Adjust the load offset. - auto laneId = gpu::LaneIdOp::create(rewriter, loc, /*upperBound=*/nullptr); + auto laneId = gpu::LaneIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); FailureOr offsets = nvgpu::getLaneIdToLdMatrixMatrixCoord(rewriter, loc, *params); if (failed(offsets)) { @@ -781,7 +781,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op, "conversion to distributed non-ldmatrix compatible load"); } - Value laneId = gpu::LaneIdOp::create(rewriter, loc, /*upperBound=*/nullptr); + Value laneId = gpu::LaneIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); // This is the individual element type. Type loadedElType = regInfo->registerLLVMType; @@ -915,7 +915,7 @@ convertTransferWriteToStores(RewriterBase &rewriter, vector::TransferWriteOp op, return rewriter.notifyMatchFailure(op, "not mma sync reg info"); VectorType vectorType = getMmaSyncVectorOperandType(*regInfo); - Value laneId = gpu::LaneIdOp::create(rewriter, loc, /*upperBound=*/nullptr); + Value laneId = gpu::LaneIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); for (unsigned i = 0; i < vectorType.getShape()[0]; i++) { Value logicalValueId = arith::ConstantOp::create( diff --git a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp index 04f56b9691fd1..5061a4454a7fd 100644 --- a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp +++ b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp @@ -753,7 +753,7 @@ struct VectorLoadOpConverter final spirv::MemoryAccessAttr memoryAccessAttr; IntegerAttr alignmentAttr; if (alignment.has_value()) { - memoryAccess = memoryAccess | spirv::MemoryAccess::Aligned; + memoryAccess |= spirv::MemoryAccess::Aligned; memoryAccessAttr = spirv::MemoryAccessAttr::get(rewriter.getContext(), memoryAccess); alignmentAttr = rewriter.getI32IntegerAttr(alignment.value()); @@ -822,7 +822,7 @@ struct VectorStoreOpConverter final spirv::MemoryAccessAttr memoryAccessAttr; IntegerAttr alignmentAttr; if (alignment.has_value()) { - memoryAccess = memoryAccess | spirv::MemoryAccess::Aligned; + memoryAccess |= spirv::MemoryAccess::Aligned; memoryAccessAttr = spirv::MemoryAccessAttr::get(rewriter.getContext(), memoryAccess); alignmentAttr = rewriter.getI32IntegerAttr(alignment.value()); diff --git a/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp b/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp index 5ceae9b16af20..67573c4ee6061 100644 --- a/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp +++ b/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp @@ -77,7 +77,7 @@ static void addScopeToFunction(LLVM::LLVMFuncOp llvmFunc, auto subprogramFlags = LLVM::DISubprogramFlags::Optimized; if (!llvmFunc.isExternal()) { id = DistinctAttr::create(UnitAttr::get(context)); - subprogramFlags = subprogramFlags | LLVM::DISubprogramFlags::Definition; + subprogramFlags |= LLVM::DISubprogramFlags::Definition; } else { compileUnitAttr = {}; } diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp index 108abe800b13e..ebc4dcf6bbcb5 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertConv2DToImg2Col.cpp @@ -20,6 +20,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" +#include #include namespace mlir { @@ -124,6 +125,10 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) { auto filterType = cast(convOp.getInputs()[1].getType()); auto outputType = cast(convOp.getOutputs()[0].getType()); + if (!convOp.hasPureTensorSemantics()) + return rewriter.notifyMatchFailure( + convOp, "expected op to have pure tensor semantics"); + if (!filterType.hasStaticShape()) return rewriter.notifyMatchFailure( convOp, "expected a static shape for the filter"); @@ -155,10 +160,15 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp) { Location loc = convOp.getLoc(); + assert(isa(filterType) && + "expected filter type to be a ranked tensor"); + auto tensorFilterType = cast(filterType); + // Reshape output and filter to the LHS and result of a (B)MNK matmul. SmallVector filterReassocIndices = {{0, 1, 2}, {3}}; auto reshapedFilterType = - RankedTensorType::get({fh * fw * ic, oc}, filterType.getElementType()); + RankedTensorType::get({fh * fw * ic, oc}, filterType.getElementType(), + tensorFilterType.getEncoding()); Value reshapedFilter = tensor::CollapseShapeOp::create( rewriter, loc, reshapedFilterType, filter, filterReassocIndices); @@ -253,6 +263,10 @@ rewriteInIm2Col(RewriterBase &rewriter, auto filterType = cast(convOp.getInputs()[1].getType()); auto outputType = cast(convOp.getOutputs()[0].getType()); + if (!convOp.hasPureTensorSemantics()) + return rewriter.notifyMatchFailure( + convOp, "expected op to have pure tensor semantics"); + if (!filterType.hasStaticShape()) return rewriter.notifyMatchFailure( convOp, "expected a static shape for the filter"); @@ -404,6 +418,10 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) { auto filterType = cast(convOp.getInputs()[1].getType()); auto outputType = cast(convOp.getOutputs()[0].getType()); + if (!convOp.hasPureTensorSemantics()) + return rewriter.notifyMatchFailure( + convOp, "expected op to have pure tensor semantics"); + if (!filterType.hasStaticShape()) return rewriter.notifyMatchFailure( convOp, "expected a static shape for the filter"); @@ -435,9 +453,14 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp) { auto loc = convOp.getLoc(); MLIRContext *context = rewriter.getContext(); + assert(isa(filterType) && + "expected filter type to be a ranked tensor"); + auto tensorFilterType = cast(filterType); + SmallVector filterReassocIndices = {{0}, {1, 2, 3}}; auto reshapedFilterType = - RankedTensorType::get({oc, ic * fh * fw}, inputType.getElementType()); + RankedTensorType::get({oc, ic * fh * fw}, inputType.getElementType(), + tensorFilterType.getEncoding()); Value reshapedFilter = tensor::CollapseShapeOp::create( rewriter, loc, reshapedFilterType, filter, filterReassocIndices); @@ -529,6 +552,10 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) { auto filterType = cast(convOp.getInputs()[1].getType()); auto outputType = cast(convOp.getOutputs()[0].getType()); + if (!convOp.hasPureTensorSemantics()) + return rewriter.notifyMatchFailure( + convOp, "expected op to have pure tensor semantics"); + if (!filterType.hasStaticShape()) return rewriter.notifyMatchFailure( convOp, "expected a static shape for the filter"); @@ -560,11 +587,16 @@ rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp) { Location loc = convOp.getLoc(); + assert(isa(filterType) && + "expected filter type to be a ranked tensor"); + auto tensorFilterType = cast(filterType); + // Reshape output and filter to the LHS and result of a "row-wise" matrix // multiplication. SmallVector filterReassocIndices = {{0}, {1, 2, 3}}; auto reshapedFilterType = - RankedTensorType::get({oc, fh * fw * ic}, filterType.getElementType()); + RankedTensorType::get({oc, fh * fw * ic}, filterType.getElementType(), + tensorFilterType.getEncoding()); Value reshapedFilter = tensor::CollapseShapeOp::create( rewriter, loc, reshapedFilterType, filter, filterReassocIndices); diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp index 8942670767231..0956c5d771394 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp @@ -141,7 +141,7 @@ SmallVector linalg::computePaddedShape( projectedDims.flip(paddingDim); AffineMap projectedMap = mlir::projectDims(partialIndexingMap, projectedDims, - /*compressDims=*/true); + /*compressDimsFlag=*/true); // If we are padding to the next multiple of, compose with ceil(sz) * sz. OpFoldResult paddingDimOfr; diff --git a/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp b/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp index 3de9c3898c713..6200366cded29 100644 --- a/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp +++ b/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp @@ -191,7 +191,7 @@ computeSuffixProductIRBlock(Location loc, OpBuilder &builder, } MemrefValue skipFullyAliasingOperations(MemrefValue source) { - while (auto op = source.getDefiningOp()) { + while (auto *op = source.getDefiningOp()) { if (auto subViewOp = dyn_cast(op); subViewOp && subViewOp.hasZeroOffset() && subViewOp.hasUnitStride()) { // A `memref.subview` with an all zero offset, and all unit strides, still @@ -208,7 +208,7 @@ MemrefValue skipFullyAliasingOperations(MemrefValue source) { } MemrefValue skipViewLikeOps(MemrefValue source) { - while (auto op = source.getDefiningOp()) { + while (auto *op = source.getDefiningOp()) { if (auto viewLike = dyn_cast(op)) { if (source == viewLike.getViewDest()) { source = cast(viewLike.getViewSource()); diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index 3d70e28ed23ab..f01ad05a778ec 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -2111,6 +2111,31 @@ Operation *TargetOp::getInnermostCapturedOmpOp() { }); } +/// Check if we can promote SPMD kernel to No-Loop kernel. +static bool canPromoteToNoLoop(Operation *capturedOp, TeamsOp teamsOp, + WsloopOp *wsLoopOp) { + // num_teams clause can break no-loop teams/threads assumption. + if (teamsOp.getNumTeamsUpper()) + return false; + + // Reduction kernels are slower in no-loop mode. + if (teamsOp.getNumReductionVars()) + return false; + if (wsLoopOp->getNumReductionVars()) + return false; + + // Check if the user allows the promotion of kernels to no-loop mode. + OffloadModuleInterface offloadMod = + capturedOp->getParentOfType(); + if (!offloadMod) + return false; + auto ompFlags = offloadMod.getFlags(); + if (!ompFlags) + return false; + return ompFlags.getAssumeTeamsOversubscription() && + ompFlags.getAssumeThreadsOversubscription(); +} + TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) { // A non-null captured op is only valid if it resides inside of a TargetOp // and is the result of calling getInnermostCapturedOmpOp() on it. @@ -2139,7 +2164,8 @@ TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) { // Detect target-teams-distribute-parallel-wsloop[-simd]. if (numWrappers == 2) { - if (!isa(innermostWrapper)) + WsloopOp *wsloopOp = dyn_cast(innermostWrapper); + if (!wsloopOp) return TargetRegionFlags::generic; innermostWrapper = std::next(innermostWrapper); @@ -2150,12 +2176,17 @@ TargetRegionFlags TargetOp::getKernelExecFlags(Operation *capturedOp) { if (!isa_and_present(parallelOp)) return TargetRegionFlags::generic; - Operation *teamsOp = parallelOp->getParentOp(); - if (!isa_and_present(teamsOp)) + TeamsOp teamsOp = dyn_cast(parallelOp->getParentOp()); + if (!teamsOp) return TargetRegionFlags::generic; - if (teamsOp->getParentOp() == targetOp.getOperation()) - return TargetRegionFlags::spmd | TargetRegionFlags::trip_count; + if (teamsOp->getParentOp() == targetOp.getOperation()) { + TargetRegionFlags result = + TargetRegionFlags::spmd | TargetRegionFlags::trip_count; + if (canPromoteToNoLoop(capturedOp, teamsOp, wsloopOp)) + result = result | TargetRegionFlags::no_loop; + return result; + } } // Detect target-teams-distribute[-simd] and target-teams-loop. else if (isa(innermostWrapper)) { diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp index 20608f97611bb..81b5788d0b9b4 100644 --- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp +++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp @@ -23,7 +23,7 @@ namespace mlir { namespace xegpu { -bool isSharedMemory(const MemRefType &memrefTy) { +static bool isSharedMemory(const MemRefType &memrefTy) { Attribute attr = memrefTy.getMemorySpace(); if (auto intAttr = llvm::dyn_cast(attr)) return intAttr.getInt() == 3; @@ -340,7 +340,7 @@ LogicalResult CreateNdDescOp::verify() { return success(); } -ParseResult parseOptionalDynamicIndexList( +static ParseResult parseOptionalDynamicIndexList( OpAsmParser &parser, SmallVectorImpl &values, DenseI64ArrayAttr &integers, SmallVectorImpl *valueTypes = nullptr, @@ -378,9 +378,9 @@ ParseResult parseOptionalDynamicIndexList( return success(); } -void printOptionalDynamicIndexList(OpAsmPrinter &printer, Operation *op, - OperandRange values, - DenseI64ArrayAttr integers) { +static void printOptionalDynamicIndexList(OpAsmPrinter &printer, Operation *op, + OperandRange values, + DenseI64ArrayAttr integers) { if (!integers || integers.empty()) return; printDynamicIndexList(printer, op, values, integers, diff --git a/mlir/lib/IR/Builders.cpp b/mlir/lib/IR/Builders.cpp index 3d366276b4375..c84e760a3f363 100644 --- a/mlir/lib/IR/Builders.cpp +++ b/mlir/lib/IR/Builders.cpp @@ -14,6 +14,7 @@ #include "mlir/IR/IRMapping.h" #include "mlir/IR/Matchers.h" #include "llvm/ADT/SmallVectorExtras.h" +#include "llvm/Support/DebugLog.h" using namespace mlir; @@ -486,9 +487,25 @@ OpBuilder::tryFold(Operation *op, SmallVectorImpl &results, // Try to fold the operation. SmallVector foldResults; + LDBG() << "Trying to fold: " + << OpWithFlags(op, OpPrintingFlags().skipRegions()); + if (op->getName().getStringRef() == "vector.extract") { + Operation *parent = op->getParentOp(); + while (parent && parent->getName().getStringRef() != "spirv.func") + parent = parent->getParentOp(); + if (parent) + parent->dump(); + } if (failed(op->fold(foldResults))) return cleanupFailure(); + int count = 0; + do { + LDBG() << "Folded in place #" << count + << " times: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); + count++; + } while (foldResults.empty() && succeeded(op->fold(foldResults))); + // An in-place fold does not require generation of any constants. if (foldResults.empty()) return success(); diff --git a/mlir/lib/Target/LLVMIR/DebugImporter.cpp b/mlir/lib/Target/LLVMIR/DebugImporter.cpp index 510ec6fe6456f..8b0326518770d 100644 --- a/mlir/lib/Target/LLVMIR/DebugImporter.cpp +++ b/mlir/lib/Target/LLVMIR/DebugImporter.cpp @@ -61,7 +61,8 @@ DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) { return DICompileUnitAttr::get( context, getOrCreateDistinctID(node), node->getSourceLanguage(), translate(node->getFile()), getStringAttrOrNull(node->getRawProducer()), - node->isOptimized(), emissionKind.value(), nameTableKind.value()); + node->isOptimized(), emissionKind.value(), nameTableKind.value(), + getStringAttrOrNull(node->getRawSplitDebugFilename())); } DICompositeTypeAttr DebugImporter::translateImpl(llvm::DICompositeType *node) { diff --git a/mlir/lib/Target/LLVMIR/DebugTranslation.cpp b/mlir/lib/Target/LLVMIR/DebugTranslation.cpp index a55445deddc2d..eeb87253e5eb8 100644 --- a/mlir/lib/Target/LLVMIR/DebugTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/DebugTranslation.cpp @@ -124,7 +124,9 @@ llvm::DICompileUnit *DebugTranslation::translateImpl(DICompileUnitAttr attr) { attr.getSourceLanguage(), translate(attr.getFile()), attr.getProducer() ? attr.getProducer().getValue() : "", attr.getIsOptimized(), - /*Flags=*/"", /*RV=*/0, /*SplitName=*/{}, + /*Flags=*/"", /*RV=*/0, + attr.getSplitDebugFilename() ? attr.getSplitDebugFilename().getValue() + : "", static_cast( attr.getEmissionKind()), 0, true, false, diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp index 4921a1990b6e8..53209a40665ae 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -2591,13 +2591,34 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, } builder.SetInsertPoint(*regionBlock, (*regionBlock)->begin()); + + // Check if we can generate no-loop kernel + bool noLoopMode = false; + omp::TargetOp targetOp = wsloopOp->getParentOfType(); + if (targetOp) { + Operation *targetCapturedOp = targetOp.getInnermostCapturedOmpOp(); + // We need this check because, without it, noLoopMode would be set to true + // for every omp.wsloop nested inside a no-loop SPMD target region, even if + // that loop is not the top-level SPMD one. + if (loopOp == targetCapturedOp) { + omp::TargetRegionFlags kernelFlags = + targetOp.getKernelExecFlags(targetCapturedOp); + if (omp::bitEnumContainsAll(kernelFlags, + omp::TargetRegionFlags::spmd | + omp::TargetRegionFlags::no_loop) && + !omp::bitEnumContainsAny(kernelFlags, + omp::TargetRegionFlags::generic)) + noLoopMode = true; + } + } + llvm::OpenMPIRBuilder::InsertPointOrErrorTy wsloopIP = ompBuilder->applyWorkshareLoop( ompLoc.DL, loopInfo, allocaIP, loopNeedsBarrier, convertToScheduleKind(schedule), chunk, isSimd, scheduleMod == omp::ScheduleModifier::monotonic, scheduleMod == omp::ScheduleModifier::nonmonotonic, isOrdered, - workshareLoopType); + workshareLoopType, noLoopMode); if (failed(handleError(wsloopIP, opInst))) return failure(); @@ -3595,8 +3616,10 @@ getDeclareTargetRefPtrSuffix(LLVM::GlobalOp globalOp, llvm::StringRef(loc.getFilename()), loc.getLine()); }; + auto vfs = llvm::vfs::getRealFileSystem(); os << llvm::format( - "_%x", ompBuilder.getTargetEntryUniqueInfo(fileInfoCallBack).FileID); + "_%x", + ompBuilder.getTargetEntryUniqueInfo(fileInfoCallBack, *vfs).FileID); } os << "_decl_tgt_ref_ptr"; @@ -5425,6 +5448,12 @@ initTargetDefaultAttrs(omp::TargetOp targetOp, Operation *capturedOp, ? llvm::omp::OMP_TGT_EXEC_MODE_GENERIC_SPMD : llvm::omp::OMP_TGT_EXEC_MODE_GENERIC : llvm::omp::OMP_TGT_EXEC_MODE_SPMD; + if (omp::bitEnumContainsAll(kernelFlags, + omp::TargetRegionFlags::spmd | + omp::TargetRegionFlags::no_loop) && + !omp::bitEnumContainsAny(kernelFlags, omp::TargetRegionFlags::generic)) + attrs.ExecFlags = llvm::omp::OMP_TGT_EXEC_MODE_SPMD_NO_LOOP; + attrs.MinTeams = minTeamsVal; attrs.MaxTeams.front() = maxTeamsVal; attrs.MinThreads = 1; @@ -5888,10 +5917,12 @@ convertDeclareTargetAttr(Operation *op, mlir::omp::DeclareTargetAttr attribute, lineNo); }; + auto vfs = llvm::vfs::getRealFileSystem(); + ompBuilder->registerTargetGlobalVariable( captureClause, deviceClause, isDeclaration, isExternallyVisible, - ompBuilder->getTargetEntryUniqueInfo(fileInfoCallBack), mangledName, - generatedRefs, /*OpenMPSimd*/ false, targetTriple, + ompBuilder->getTargetEntryUniqueInfo(fileInfoCallBack, *vfs), + mangledName, generatedRefs, /*OpenMPSimd*/ false, targetTriple, /*GlobalInitializer*/ nullptr, /*VariableLinkage*/ nullptr, gVal->getType(), gVal); @@ -5901,9 +5932,9 @@ convertDeclareTargetAttr(Operation *op, mlir::omp::DeclareTargetAttr attribute, ompBuilder->Config.hasRequiresUnifiedSharedMemory())) { ompBuilder->getAddrOfDeclareTargetVar( captureClause, deviceClause, isDeclaration, isExternallyVisible, - ompBuilder->getTargetEntryUniqueInfo(fileInfoCallBack), mangledName, - generatedRefs, /*OpenMPSimd*/ false, targetTriple, gVal->getType(), - /*GlobalInitializer*/ nullptr, + ompBuilder->getTargetEntryUniqueInfo(fileInfoCallBack, *vfs), + mangledName, generatedRefs, /*OpenMPSimd*/ false, targetTriple, + gVal->getType(), /*GlobalInitializer*/ nullptr, /*VariableLinkage*/ nullptr); } } diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp index 5d6c8c8b79489..9603813e059d3 100644 --- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp @@ -1374,7 +1374,7 @@ LogicalResult ModuleImport::convertAlias(llvm::GlobalAlias *alias) { AliasOp aliasOp = AliasOp::create(builder, mlirModule.getLoc(), type, convertLinkageFromLLVM(alias->getLinkage()), alias->getName(), - /*dso_local=*/alias->isDSOLocal(), + /*dsoLocal=*/alias->isDSOLocal(), /*thread_local=*/alias->isThreadLocal(), /*attrs=*/ArrayRef()); globalInsertionOp = aliasOp; @@ -1507,8 +1507,8 @@ LogicalResult ModuleImport::convertGlobal(llvm::GlobalVariable *globalVar) { GlobalOp globalOp = GlobalOp::create( builder, mlirModule.getLoc(), type, globalVar->isConstant(), convertLinkageFromLLVM(globalVar->getLinkage()), StringRef(globalName), - valueAttr, alignment, /*addr_space=*/globalVar->getAddressSpace(), - /*dso_local=*/globalVar->isDSOLocal(), + valueAttr, alignment, /*addrSpace=*/globalVar->getAddressSpace(), + /*dsoLocal=*/globalVar->isDSOLocal(), /*thread_local=*/globalVar->isThreadLocal(), /*comdat=*/SymbolRefAttr(), /*attrs=*/ArrayRef(), /*dbgExprs=*/globalExpressionAttrs); globalInsertionOp = globalOp; diff --git a/mlir/lib/Transforms/Mem2Reg.cpp b/mlir/lib/Transforms/Mem2Reg.cpp index d36a3c1362c19..b3057129fb9fd 100644 --- a/mlir/lib/Transforms/Mem2Reg.cpp +++ b/mlir/lib/Transforms/Mem2Reg.cpp @@ -286,7 +286,7 @@ LogicalResult MemorySlotPromotionAnalyzer::computeBlockingUses( mlir::getForwardSlice(slot.ptr, &forwardSlice); for (Operation *user : forwardSlice) { // If the next operation has no blocking uses, everything is fine. - auto it = userToBlockingUses.find(user); + auto *it = userToBlockingUses.find(user); if (it == userToBlockingUses.end()) continue; diff --git a/mlir/lib/Transforms/RemoveDeadValues.cpp b/mlir/lib/Transforms/RemoveDeadValues.cpp index 0e84b6dd17f29..e0c65b0e09774 100644 --- a/mlir/lib/Transforms/RemoveDeadValues.cpp +++ b/mlir/lib/Transforms/RemoveDeadValues.cpp @@ -88,6 +88,8 @@ struct FunctionToCleanUp { struct OperationToCleanup { Operation *op; BitVector nonLive; + Operation *callee = + nullptr; // Optional: For CallOpInterface ops, stores the callee function }; struct BlockArgsToCleanup { @@ -287,7 +289,8 @@ static void processSimpleOp(Operation *op, RunLivenessAnalysis &la, static void processFuncOp(FunctionOpInterface funcOp, Operation *module, RunLivenessAnalysis &la, DenseSet &nonLiveSet, RDVFinalCleanupList &cl) { - LDBG() << "Processing function op: " << funcOp.getOperation()->getName(); + LDBG() << "Processing function op: " + << OpWithFlags(funcOp, OpPrintingFlags().skipRegions()); if (funcOp.isPublic() || funcOp.isExternal()) { LDBG() << "Function is public or external, skipping: " << funcOp.getOperation()->getName(); @@ -306,19 +309,19 @@ static void processFuncOp(FunctionOpInterface funcOp, Operation *module, nonLiveSet.insert(arg); } - // Do (2). + // Do (2). (Skip creating generic operand cleanup entries for call ops. + // Call arguments will be removed in the call-site specific segment-aware + // cleanup, avoiding generic eraseOperands bitvector mechanics.) SymbolTable::UseRange uses = *funcOp.getSymbolUses(module); for (SymbolTable::SymbolUse use : uses) { Operation *callOp = use.getUser(); assert(isa(callOp) && "expected a call-like user"); - // The number of operands in the call op may not match the number of - // arguments in the func op. - BitVector nonLiveCallOperands(callOp->getNumOperands(), false); - SmallVector callOpOperands = - operandsToOpOperands(cast(callOp).getArgOperands()); - for (int index : nonLiveArgs.set_bits()) - nonLiveCallOperands.set(callOpOperands[index]->getOperandNumber()); - cl.operands.push_back({callOp, nonLiveCallOperands}); + // Push an empty operand cleanup entry so that call-site specific logic in + // cleanUpDeadVals runs (it keys off CallOpInterface). The BitVector is + // intentionally all false to avoid generic erasure. + // Store the funcOp as the callee to avoid expensive symbol lookup later. + cl.operands.push_back({callOp, BitVector(callOp->getNumOperands(), false), + funcOp.getOperation()}); } // Do (3). @@ -746,6 +749,10 @@ static void cleanUpDeadVals(RDVFinalCleanupList &list) { // 3. Functions LDBG() << "Cleaning up " << list.functions.size() << " functions"; + // Record which function arguments were erased so we can shrink call-site + // argument segments for CallOpInterface operations (e.g. ops using + // AttrSizedOperandSegments) in the next phase. + DenseMap erasedFuncArgs; for (auto &f : list.functions) { LDBG() << "Cleaning up function: " << f.funcOp.getOperation()->getName(); LDBG() << " Erasing " << f.nonLiveArgs.count() << " non-live arguments"; @@ -754,17 +761,52 @@ static void cleanUpDeadVals(RDVFinalCleanupList &list) { // Some functions may not allow erasing arguments or results. These calls // return failure in such cases without modifying the function, so it's okay // to proceed. - (void)f.funcOp.eraseArguments(f.nonLiveArgs); + if (succeeded(f.funcOp.eraseArguments(f.nonLiveArgs))) { + // Record only if we actually erased something. + if (f.nonLiveArgs.any()) + erasedFuncArgs.try_emplace(f.funcOp.getOperation(), f.nonLiveArgs); + } (void)f.funcOp.eraseResults(f.nonLiveRets); } // 4. Operands LDBG() << "Cleaning up " << list.operands.size() << " operand lists"; for (OperationToCleanup &o : list.operands) { - if (o.op->getNumOperands() > 0) { - LDBG() << "Erasing " << o.nonLive.count() - << " non-live operands from operation: " - << OpWithFlags(o.op, OpPrintingFlags().skipRegions()); + // Handle call-specific cleanup only when we have a cached callee reference. + // This avoids expensive symbol lookup and is defensive against future + // changes. + bool handledAsCall = false; + if (o.callee && isa(o.op)) { + auto call = cast(o.op); + auto it = erasedFuncArgs.find(o.callee); + if (it != erasedFuncArgs.end()) { + const BitVector &deadArgIdxs = it->second; + MutableOperandRange args = call.getArgOperandsMutable(); + // First, erase the call arguments corresponding to erased callee + // args. We iterate backwards to preserve indices. + for (unsigned argIdx : llvm::reverse(deadArgIdxs.set_bits())) + args.erase(argIdx); + // If this operand cleanup entry also has a generic nonLive bitvector, + // clear bits for call arguments we already erased above to avoid + // double-erasing (which could impact other segments of ops with + // AttrSizedOperandSegments). + if (o.nonLive.any()) { + // Map the argument logical index to the operand number(s) recorded. + int operandOffset = call.getArgOperands().getBeginOperandIndex(); + for (int argIdx : deadArgIdxs.set_bits()) { + int operandNumber = operandOffset + argIdx; + if (operandNumber < static_cast(o.nonLive.size())) + o.nonLive.reset(operandNumber); + } + } + handledAsCall = true; + } + } + // Perform generic operand erasure for: + // - Non-call operations + // - Call operations without cached callee (where handledAsCall is false) + // But skip call operations that were already handled via segment-aware path + if (!handledAsCall && o.nonLive.any()) { o.op->eraseOperands(o.nonLive); } } diff --git a/mlir/lib/Transforms/Utils/FoldUtils.cpp b/mlir/lib/Transforms/Utils/FoldUtils.cpp index 5e07509871ea2..68ad3acf295c8 100644 --- a/mlir/lib/Transforms/Utils/FoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/FoldUtils.cpp @@ -16,6 +16,7 @@ #include "mlir/IR/Builders.h" #include "mlir/IR/Matchers.h" #include "mlir/IR/Operation.h" +#include "llvm/Support/DebugLog.h" using namespace mlir; @@ -67,7 +68,8 @@ static Operation *materializeConstant(Dialect *dialect, OpBuilder &builder, // OperationFolder //===----------------------------------------------------------------------===// -LogicalResult OperationFolder::tryToFold(Operation *op, bool *inPlaceUpdate) { +LogicalResult OperationFolder::tryToFold(Operation *op, bool *inPlaceUpdate, + int maxIterations) { if (inPlaceUpdate) *inPlaceUpdate = false; @@ -86,7 +88,7 @@ LogicalResult OperationFolder::tryToFold(Operation *op, bool *inPlaceUpdate) { // Try to fold the operation. SmallVector results; - if (failed(tryToFold(op, results))) + if (failed(tryToFold(op, results, maxIterations))) return failure(); // Check to see if the operation was just updated in place. @@ -224,10 +226,19 @@ bool OperationFolder::isFolderOwnedConstant(Operation *op) const { /// Tries to perform folding on the given `op`. If successful, populates /// `results` with the results of the folding. LogicalResult OperationFolder::tryToFold(Operation *op, - SmallVectorImpl &results) { + SmallVectorImpl &results, + int maxIterations) { SmallVector foldResults; - if (failed(op->fold(foldResults)) || - failed(processFoldResults(op, results, foldResults))) + if (failed(op->fold(foldResults))) + return failure(); + int count = 1; + do { + LDBG() << "Folded in place #" << count + << " times: " << OpWithFlags(op, OpPrintingFlags().skipRegions()); + } while (count++ < maxIterations && foldResults.empty() && + succeeded(op->fold(foldResults))); + + if (failed(processFoldResults(op, results, foldResults))) return failure(); return success(); } diff --git a/mlir/test/CAPI/llvm.c b/mlir/test/CAPI/llvm.c index 12a436ad12fc4..f5fbb4645cd5d 100644 --- a/mlir/test/CAPI/llvm.c +++ b/mlir/test/CAPI/llvm.c @@ -270,7 +270,7 @@ static void testDebugInfoAttributes(MlirContext ctx) { MlirAttribute compile_unit = mlirLLVMDICompileUnitAttrGet( ctx, id, LLVMDWARFSourceLanguageC99, file, foo, false, - MlirLLVMDIEmissionKindFull, MlirLLVMDINameTableKindDefault); + MlirLLVMDIEmissionKindFull, MlirLLVMDINameTableKindDefault, bar); // CHECK: #llvm.di_compile_unit<{{.*}}> mlirAttributeDump(compile_unit); diff --git a/mlir/test/Dialect/Arith/constant-fold.mlir b/mlir/test/Dialect/Arith/constant-fold.mlir new file mode 100644 index 0000000000000..172945fafdaf3 --- /dev/null +++ b/mlir/test/Dialect/Arith/constant-fold.mlir @@ -0,0 +1,18 @@ +// Test with the default (one application of the folder) and then with 2 iterations. +// RUN: mlir-opt %s --pass-pipeline="builtin.module(func.func(test-single-fold))" | FileCheck %s --check-prefixes=CHECK,CHECK-ONE +// RUN: mlir-opt %s --pass-pipeline="builtin.module(func.func(test-single-fold{max-iterations=2}))" | FileCheck %s --check-prefixes=CHECK,CHECK-TWO + + +// Folding entirely this requires to move the constant to the right +// before invoking the op-specific folder. +// With one iteration, we just push the constant to the right. +// With a second iteration, we actually fold the "add" (x+0->x) +// CHECK: func @recurse_fold_traits(%[[ARG0:.*]]: i32) +func.func @recurse_fold_traits(%arg0 : i32) -> i32 { + %cst0 = arith.constant 0 : i32 +// CHECK-ONE: %[[ADD:.*]] = arith.addi %[[ARG0]], + %res = arith.addi %cst0, %arg0 : i32 +// CHECK-ONE: return %[[ADD]] : i32 +// CHECK-TWO: return %[[ARG0]] : i32 + return %res : i32 +} diff --git a/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir b/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir index 8627fcd2576b9..152a392afe247 100644 --- a/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir +++ b/mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir @@ -26,6 +26,26 @@ module attributes {transform.with_named_sequence} { // ----- +// Memref semantics is not supported. +// Check that we emit an error. +func.func @negative_conv_memref(%arg0: memref<1x16x16x4xf32>, %arg1: memref<16x3x3x4xf32>, %arg2: memref<1x14x14x16xf32>) { + // expected-note@below {{when applied to this op}} + linalg.conv_2d_nhwc_fhwc {dilations = dense<1> : memref<2xi64>, strides = dense<1> : memref<2xi64> } + ins(%arg0, %arg1: memref<1x16x16x4xf32>, memref<16x3x3x4xf32>) outs(%arg2: memref<1x14x14x16xf32>) + return +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_fhwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op + // expected-error@below {{failed to apply}} + %img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield + } +} + +// ----- + // Check that we get the proper handles for the img2col tensor producer // and the final instruction. @@ -267,6 +287,31 @@ module attributes {transform.with_named_sequence} { // ----- +// Check that the encoding on the filter (weights) tensor is propagated when applying the transform. + +// CHECK: func.func @batch_nchw_conv_with_filter_encoding(%[[INPUT:.+]]: tensor<8x4x16x16xf32>, %[[FILTER:.*]]: tensor<16x4x3x3xf32, 42 : i32>, %[[OUTPUT:.*]]: tensor<8x16x14x14xf32>) +// CHECK-DAG: %[[COLLAPSED_FILTER:.+]] = tensor.collapse_shape %[[FILTER]] + // CHECK-SAME{LITERAL}: [[0], [1, 2, 3]] : tensor<16x4x3x3xf32, 42 : i32> into tensor<16x36xf32, 42 : i32> +// CHECK: %[[COL_TENSOR:.+]] = linalg.generic {{.*}} ins(%[[INPUT]] : tensor<8x4x16x16xf32>) +// CHECK: %[[MATMUL_RESULT:.+]] = linalg.generic {{.*}} ins(%[[COLLAPSED_FILTER]], %[[COL_TENSOR]] : tensor<16x36xf32, 42 : i32>, tensor<8x36x196xf32>) +func.func @batch_nchw_conv_with_filter_encoding(%arg0: tensor<8x4x16x16xf32>, %arg1: tensor<16x4x3x3xf32, 42 : i32>, %arg2: tensor<8x16x14x14xf32>) -> tensor<8x16x14x14xf32> { + %0 = linalg.conv_2d_nchw_fchw + {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> } + ins(%arg0, %arg1: tensor<8x4x16x16xf32>, tensor<16x4x3x3xf32, 42 : i32>) + outs(%arg2: tensor<8x16x14x14xf32>) -> tensor<8x16x14x14xf32> + return %0 : tensor<8x16x14x14xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield + } +} + +// ----- + // CHECK: IR printer: tensor_producer // CHECK-NEXT: %[[COL_TENSOR:.+]] = linalg.generic // CHECK-SAME: affine_map<(d0, d1, d2) -> (d0, d1 floordiv 14 + d2 floordiv 12, d1 mod 14 + (d2 mod 12) floordiv 4, d2 mod 4)> @@ -290,7 +335,7 @@ module attributes {transform.with_named_sequence} { // CHECK-DAG: %[[COLLAPSED_OUT:.+]] = tensor.collapse_shape %[[OUTPUT]] {{\[}}[0], [1, 2], [3]] : tensor<1x14x14x16xf32> into tensor<1x196x16xf32> // CHECK: %[[INIT_COL_TENSOR:.+]] = tensor.empty() : tensor<1x196x36xf32> // CHECK: %[[COL_TENSOR:.+]] = linalg.generic -// CHECK-SAME: [#[[MAP0]], #[[MAP1]]] +// CHECK-SAME: [#[[MAP0]], #[[MAP1]]], {{.*}} ins(%[[INPUT]] : tensor<1x16x16x4xf32>) outs(%[[INIT_COL_TENSOR]] : tensor<1x196x36xf32>) // CHECK: ^bb0(%[[OUT_DATA:.+]]: f32) // CHECK: linalg.yield %{{.+}} : f32 // CHECK: %[[MATMUL_RESULT:.+]] = linalg.generic @@ -327,6 +372,31 @@ module attributes {transform.with_named_sequence} { // ----- +// Check that the encoding on the filter (weights) tensor is propagated when applying the transform. + +// CHECK: func.func @conv_2d_nhwc_fhwc_with_filter_encoding(%[[INPUT:.+]]: tensor<1x16x16x4xf32>, %[[FILTER:.*]]: tensor<16x3x3x4xf32, 42 : i32>, %[[OUTPUT:.*]]: tensor<1x14x14x16xf32>) +// CHECK-DAG: %[[COLLAPSED_FILTER:.+]] = tensor.collapse_shape %[[FILTER]] + // CHECK-SAME{LITERAL}: [[0], [1, 2, 3]] : tensor<16x3x3x4xf32, 42 : i32> into tensor<16x36xf32, 42 : i32> +// CHECK: %[[COL_TENSOR:.+]] = linalg.generic {{.*}} ins(%[[INPUT]] : tensor<1x16x16x4xf32>) +// CHECK: %[[MATMUL_RESULT:.+]] = linalg.generic {{.*}} ins(%[[COL_TENSOR]], %[[COLLAPSED_FILTER]] : tensor<1x196x36xf32>, tensor<16x36xf32, 42 : i32>) +func.func @conv_2d_nhwc_fhwc_with_filter_encoding(%input: tensor<1x16x16x4xf32>, %filter: tensor<16x3x3x4xf32, 42 : i32>, %out: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> { + %0 = linalg.conv_2d_nhwc_fhwc + { dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> } + ins(%input, %filter: tensor<1x16x16x4xf32>, tensor<16x3x3x4xf32, 42 : i32>) + outs(%out: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> + return %0 : tensor<1x14x14x16xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["linalg.conv_2d_nhwc_fhwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + transform.yield + } +} + +// ----- + // Check for signed extend when the input type is smaller than the accumulator type. // CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)> diff --git a/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir b/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir index 547c7355e00c6..b73bc69393dab 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-attr-interface.mlir @@ -7,10 +7,8 @@ gpu.module @test { //CHECK: [[IDY:%.+]] = affine.apply #map()[[[sgId]]] //CHECK: [[c32:%.+]] = arith.constant 32 : index //CHECK: [[LOCALY:%.+]] = index.mul [[IDY]], [[c32]] - //CHECK: [[c0:%.+]] = arith.constant 0 : index - //CHECK: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index //CHECK: [[c128:%.+]] = arith.constant 128 : index - //CHECK: [[MODY:%.+]] = index.remu [[Y]], [[c128]] + //CHECK: [[MODY:%.+]] = index.remu [[LOCALY]], [[c128]] //CHECK: [[BASE:%.+]] = vector.step : vector<32xindex> //CHECK: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<32xindex> //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<32xindex> @@ -23,10 +21,8 @@ gpu.module @test { //CHECK: [[IDY:%.+]] = affine.apply #map()[[[sgId]]] //CHECK: [[c32:%.+]] = arith.constant 32 : index //CHECK: [[LOCALY:%.+]] = index.mul [[IDY]], [[c32]] - //CHECK: [[c0:%.+]] = arith.constant 0 : index - //CHECK: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index //CHECK: [[c128:%.+]] = arith.constant 128 : index - //CHECK: [[MODY:%.+]] = index.remu [[Y]], [[c128]] + //CHECK: [[MODY:%.+]] = index.remu [[LOCALY]], [[c128]] //CHECK: [[BASE:%.+]] = vector.step : vector<32xindex> //CHECK: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<32xindex> //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<32xindex> diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir index e5cc65e6bd3d7..d2d250cbe0f66 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-rr.mlir @@ -27,12 +27,10 @@ gpu.module @test_round_robin_assignment { //CHECK: [[LX:%.+]] = index.mul [[IdX]], [[C64]] //CHECK: [[C0:%.+]] = arith.constant 0 : index //CHECK: [[C0_1:%.+]] = arith.constant 0 : index - //CHECK: [[ADDY:%.+]] = arith.addi [[LY]], [[C0]] : index - //CHECK: [[ADDX:%.+]] = arith.addi [[LX]], [[C0_1]] : index //CHECK: [[C128:%.+]] = arith.constant 128 : index - //CHECK: [[offY:%.+]] = index.remu [[ADDY]], [[C128]] + //CHECK: [[offY:%.+]] = index.remu [[LY]], [[C128]] //CHECK: [[C64_2:%.+]] = arith.constant 64 : index - //CHECK: [[offX:%.+]] = index.remu [[ADDX]], [[C64_2]] + //CHECK: [[offX:%.+]] = index.remu [[LX]], [[C64_2]] //CHECK: xegpu.create_nd_tdesc [[ARG_0]][[[offY]], [[offX]]] : memref<256x128xf32> -> !xegpu.tensor_desc<16x64xf32> %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<256x128xf32> -> !xegpu.tensor_desc<128x64xf32, #xegpu.layout> diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir index 48fc633974e63..03c63861705d9 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg-unify-ops.mlir @@ -325,12 +325,10 @@ gpu.module @test_distribution { //CHECK: [[l_off_x:%.+]] = index.mul [[id_x]], [[c32_1]] //CHECK: [[c0:%.+]] = arith.constant 0 : index //CHECK: [[c0_1:%.+]] = arith.constant 0 : index - //CHECK: [[l_off_y_0:%.+]] = arith.addi [[l_off_y]], [[c0]] : index - //CHECK: [[l_off_x_0:%.+]] = arith.addi [[l_off_x]], [[c0_1]] : index //CHECK: [[c64:%.+]] = arith.constant 64 : index - //CHECK: [[off_y:%.+]] = index.remu [[l_off_y_0]], [[c64]] + //CHECK: [[off_y:%.+]] = index.remu [[l_off_y]], [[c64]] //CHECK: [[c128:%.+]] = arith.constant 128 : index - //CHECK: [[off_x:%.+]] = index.remu [[l_off_x_0]], [[c128]] + //CHECK: [[off_x:%.+]] = index.remu [[l_off_x]], [[c128]] //CHECK: xegpu.load_matrix [[mdesc]][[[off_y]], [[off_x]]] <{layout = #xegpu.layout}>: !xegpu.mem_desc<64x128xf32>, index, index -> vector<32x32xf32> %0 = xegpu.create_mem_desc %arg0 : memref<32768xi8, 3> -> !xegpu.mem_desc<64x128xf32> %1 = xegpu.load_matrix %0[0, 0] <{layout = #xegpu.layout}>: !xegpu.mem_desc<64x128xf32> -> vector<64x128xf32> @@ -349,13 +347,11 @@ gpu.module @test_distribution { //CHECK: [[id_y:%.+]] = affine.apply #map()[[[sgid]]] //CHECK: [[id_x:%.+]] = affine.apply #map1()[[[sgid]]] //CHECK: [[c32:%.+]] = arith.constant 32 : index - //CHECK: [[l_off_y_0:%.+]] = index.mul [[id_y]], [[c32]] + //CHECK: [[l_off_y:%.+]] = index.mul [[id_y]], [[c32]] //CHECK: [[c32_1:%.+]] = arith.constant 32 : index - //CHECK: [[l_off_x_0:%.+]] = index.mul [[id_x]], [[c32_1]] + //CHECK: [[l_off_x:%.+]] = index.mul [[id_x]], [[c32_1]] //CHECK: [[c0:%.+]] = arith.constant 0 : index //CHECK: [[c0_2:%.+]] = arith.constant 0 : index - //CHECK: [[l_off_y:%.+]] = arith.addi [[l_off_y_0]], [[c0]] : index - //CHECK: [[l_off_x:%.+]] = arith.addi [[l_off_x_0]], [[c0_2]] : index //CHECK: [[c64:%.+]] = arith.constant 64 : index //CHECK: [[off_y:%.+]] = index.remu [[l_off_y]], [[c64]] //CHECK: [[c128:%.+]] = arith.constant 128 : index @@ -412,11 +408,10 @@ gpu.module @test_distribution { //CHECK: [[sgId:%.+]] = gpu.subgroup_id : index //CHECK-DAG: [[IDY:%.+]] = affine.apply #map2()[[[sgId]]] //CHECK-DAG: [[c32:%.+]] = arith.constant 32 : index - //CHECK-DAG: [[LOCALY:%.+]] = index.mul [[IDY]], [[c32]] + //CHECK-DAG: [[LY:%.+]] = index.mul [[IDY]], [[c32]] //CHECK-DAG: [[c0:%.+]] = arith.constant 0 : index - //CHECK-DAG: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index //CHECK-DAG: [[c128:%.+]] = arith.constant 128 : index - //CHECK-DAG: [[MODY:%.+]] = index.remu [[Y]], [[c128]] + //CHECK-DAG: [[MODY:%.+]] = index.remu [[LY]], [[c128]] //CHECK-DAG: [[BASE:%.+]] = vector.step : vector<32xindex> //CHECK-DAG: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<32xindex> //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<32xindex> @@ -430,9 +425,8 @@ gpu.module @test_distribution { //CHECK-DAG: [[c8:%.+]] = arith.constant 8 : index //CHECK-DAG: [[LOCALY:%.+]] = index.mul [[sgId]], [[c8]] //CHECK-DAG: [[c0:%.+]] = arith.constant 0 : index - //CHECK-DAG: [[Y:%.+]] = arith.addi [[LOCALY]], [[c0]] : index //CHECK-DAG: [[c128:%.+]] = arith.constant 128 : index - //CHECK-DAG: [[MODY:%.+]] = index.remu [[Y]], [[c128]] + //CHECK-DAG: [[MODY:%.+]] = index.remu [[LOCALY]], [[c128]] //CHECK-DAG: [[BASE:%.+]] = vector.step : vector<8xindex> //CHECK-DAG: [[CAST:%.+]] = vector.broadcast [[MODY]] : index to vector<8xindex> //CHECK: [[ADD:%.+]] = arith.addi [[BASE]], [[CAST]] : vector<8xindex> diff --git a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir index c0fb373835e3d..e83229e3a3995 100644 --- a/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir +++ b/mlir/test/Dialect/XeGPU/xegpu-wg-to-sg.mlir @@ -14,12 +14,10 @@ gpu.module @test_1_1_assignment { //CHECK: [[LX:%.+]] = index.mul [[SGIDX]], [[C32]] //CHECK: [[C0:%.+]] = arith.constant 0 : index //CHECK: [[C0_1:%.+]] = arith.constant 0 : index - //CHECK: [[UY:%.+]] = arith.addi [[LY]], [[C0]] : index - //CHECK: [[UX:%.+]] = arith.addi [[LX]], [[C0_1]] : index //CHECK: [[C256:%.+]] = arith.constant 256 : index - //CHECK: [[Y:%.+]] = index.remu [[UY]], [[C256]] + //CHECK: [[Y:%.+]] = index.remu [[LY]], [[C256]] //CHECK: [[C128:%.+]] = arith.constant 128 : index - //CHECK: [[X:%.+]] = index.remu [[UX]], [[C128]] + //CHECK: [[X:%.+]] = index.remu [[LX]], [[C128]] //CHECK: [[TDESC:%.+]] = xegpu.create_nd_tdesc [[ARG_0]][[[Y]], [[X]]] : memref<256x128xf32> -> !xegpu.tensor_desc<32x32xf32, #xegpu.layout> %tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<256x128xf32> -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout> @@ -37,17 +35,13 @@ gpu.module @test_1_1_assignment { //CHECK: [[LX:%.+]] = index.mul [[SGIDX]], [[C32]] //CHECK: [[C0:%.+]] = arith.constant 0 : index //CHECK: [[C0_2:%.+]] = arith.constant 0 : index - //CHECK: [[UY:%.+]] = arith.addi [[LY]], [[C0]] : index - //CHECK: [[UX:%.+]] = arith.addi [[LX]], [[C0_2]] : index //CHECK: [[C256:%.+]] = arith.constant 256 : index - //CHECK: [[MODY:%.+]] = index.remu [[UY]], [[C256]] + //CHECK: [[MODY:%.+]] = index.remu [[LY]], [[C256]] //CHECK: [[C128:%.+]] = arith.constant 128 : index - //CHECK: [[MODX:%.+]] = index.remu [[UX]], [[C128]] + //CHECK: [[MODX:%.+]] = index.remu [[LX]], [[C128]] //CHECK: [[C0_3:%.+]] = arith.constant 0 : index - //CHECK: [[Y:%.+]] = index.add [[MODY]], [[C0_3]] //CHECK: [[C0_4:%.+]] = arith.constant 0 : index - //CHECK: [[X:%.+]] = index.add [[MODX]], [[C0_4]] - //CHECK: [[TDESC:%.+]] = xegpu.create_nd_tdesc [[ARG_0]][1, [[Y]], [[X]]] : memref<3x256x128xf32> -> !xegpu.tensor_desc<32x32xf32, #xegpu.layout> + //CHECK: [[TDESC:%.+]] = xegpu.create_nd_tdesc [[ARG_0]][1, [[MODY]], [[MODX]]] : memref<3x256x128xf32> -> !xegpu.tensor_desc<32x32xf32, #xegpu.layout> %tdesc = xegpu.create_nd_tdesc %src[1, 0, 0] : memref<3x256x128xf32> -> !xegpu.tensor_desc<256x128xf32, #xegpu.layout> gpu.return diff --git a/mlir/test/Target/LLVMIR/Import/debug-info.ll b/mlir/test/Target/LLVMIR/Import/debug-info.ll index 9e2a17fb436af..e056e43a0982c 100644 --- a/mlir/test/Target/LLVMIR/Import/debug-info.ll +++ b/mlir/test/Target/LLVMIR/Import/debug-info.ll @@ -215,7 +215,7 @@ define void @composite_type() !dbg !3 { ; // ----- ; CHECK-DAG: #[[FILE:.+]] = #llvm.di_file<"debug-info.ll" in "/"> -; CHECK-DAG: #[[CU:.+]] = #llvm.di_compile_unit, sourceLanguage = DW_LANG_C, file = #[[FILE]], isOptimized = false, emissionKind = None, nameTableKind = None> +; CHECK-DAG: #[[CU:.+]] = #llvm.di_compile_unit, sourceLanguage = DW_LANG_C, file = #[[FILE]], isOptimized = false, emissionKind = None, nameTableKind = None, splitDebugFilename = "test.dwo"> ; Verify an empty subroutine types list is supported. ; CHECK-DAG: #[[SP_TYPE:.+]] = #llvm.di_subroutine_type ; CHECK-DAG: #[[SP:.+]] = #llvm.di_subprogram, compileUnit = #[[CU]], scope = #[[FILE]], name = "subprogram", linkageName = "subprogram", file = #[[FILE]], line = 42, scopeLine = 42, subprogramFlags = Definition, type = #[[SP_TYPE]]> @@ -227,7 +227,7 @@ define void @subprogram() !dbg !3 { !llvm.dbg.cu = !{!1} !llvm.module.flags = !{!0} !0 = !{i32 2, !"Debug Info Version", i32 3} -!1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2, nameTableKind: None) +!1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2, nameTableKind: None, splitDebugFilename: "test.dwo") !2 = !DIFile(filename: "debug-info.ll", directory: "/") !3 = distinct !DISubprogram(name: "subprogram", linkageName: "subprogram", scope: !2, file: !2, line: 42, scopeLine: 42, spFlags: DISPFlagDefinition, unit: !1, type: !4) !4 = !DISubroutineType(cc: DW_CC_normal, types: !5) diff --git a/mlir/test/Target/LLVMIR/llvmir-debug.mlir b/mlir/test/Target/LLVMIR/llvmir-debug.mlir index 274d64af78283..38ae63d1908e9 100644 --- a/mlir/test/Target/LLVMIR/llvmir-debug.mlir +++ b/mlir/test/Target/LLVMIR/llvmir-debug.mlir @@ -43,7 +43,7 @@ llvm.func @func_no_debug() { #cu = #llvm.di_compile_unit< id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #file, producer = "MLIR", isOptimized = true, emissionKind = Full, - nameTableKind = None + nameTableKind = None, splitDebugFilename = "test.dwo" > #composite = #llvm.di_composite_type< tag = DW_TAG_structure_type, name = "composite", file = #file, @@ -140,7 +140,7 @@ llvm.func @empty_types() { llvm.return } loc(fused<#sp1>["foo.mlir":2:1]) -// CHECK: ![[CU_LOC:.*]] = distinct !DICompileUnit(language: DW_LANG_C, file: ![[CU_FILE_LOC:.*]], producer: "MLIR", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, nameTableKind: None) +// CHECK: ![[CU_LOC:.*]] = distinct !DICompileUnit(language: DW_LANG_C, file: ![[CU_FILE_LOC:.*]], producer: "MLIR", isOptimized: true, runtimeVersion: 0, splitDebugFilename: "test.dwo", emissionKind: FullDebug, nameTableKind: None) // CHECK: ![[CU_FILE_LOC]] = !DIFile(filename: "foo.mlir", directory: "/test/") // CHECK: ![[FUNC_LOC]] = distinct !DISubprogram(name: "func_with_debug", linkageName: "func_with_debug", scope: ![[NESTED_NAMESPACE:.*]], file: ![[CU_FILE_LOC]], line: 3, type: ![[FUNC_TYPE:.*]], scopeLine: 3, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: ![[CU_LOC]]) diff --git a/mlir/test/Transforms/remove-dead-values-call-segments.mlir b/mlir/test/Transforms/remove-dead-values-call-segments.mlir new file mode 100644 index 0000000000000..fed9cabbd2ee8 --- /dev/null +++ b/mlir/test/Transforms/remove-dead-values-call-segments.mlir @@ -0,0 +1,23 @@ +// RUN: mlir-opt --split-input-file --remove-dead-values --mlir-print-op-generic %s | FileCheck %s --check-prefix=GEN + +// ----- +// Private callee: both args become dead after internal DCE; RDV drops callee +// args and shrinks the *args* segment on the call-site to zero; sizes kept in +// sync. + +module { + func.func private @callee(%x: i32, %y: i32) { + %u = arith.addi %x, %x : i32 // %y is dead + return + } + + func.func @caller(%a: i32, %b: i32) { + // args segment initially has 2 operands. + "test.call_with_segments"(%a, %b) { callee = @callee, + operandSegmentSizes = array } : (i32, i32) -> () + return + } +} + +// GEN: "test.call_with_segments"() <{callee = @callee, operandSegmentSizes = array}> : () -> () +// ^ args shrank from 2 -> 0 diff --git a/mlir/test/Transforms/remove-dead-values.mlir b/mlir/test/Transforms/remove-dead-values.mlir index fa2c145bd3701..56449469dc29f 100644 --- a/mlir/test/Transforms/remove-dead-values.mlir +++ b/mlir/test/Transforms/remove-dead-values.mlir @@ -615,3 +615,37 @@ module @last_block_not_exit { // CHECK-LABEL: @call_private_but_not_use // CHECK: call @terminated_with_condbr(%false, %true) : (i1, i1) } + +// ----- + +// Test the elimination of function arguments. + +// CHECK-LABEL: func private @single_parameter +// CHECK-SAME: () { +func.func private @single_parameter(%arg0: index) { + return +} + +// CHECK-LABEL: func.func private @mutl_parameter( +// CHECK-SAME: %[[ARG0:.*]]: index) +// CHECK: return %[[ARG0]] +func.func private @mutl_parameter(%arg0: index, %arg1: index, %arg2: index) -> index { + return %arg1 : index +} + +// CHECK-LABEL: func private @eliminate_parameter +// CHECK-SAME: () { +func.func private @eliminate_parameter(%arg0: index, %arg1: index) { + call @single_parameter(%arg0) : (index) -> () + return +} + +// CHECK-LABEL: func @callee +// CHECK-SAME: (%[[ARG0:.*]]: index, %[[ARG1:.*]]: index, %[[ARG2:.*]]: index) +func.func @callee(%arg0: index, %arg1: index, %arg2: index) -> index { +// CHECK: call @eliminate_parameter() : () -> () + call @eliminate_parameter(%arg0, %arg1) : (index, index) -> () +// CHECK: call @mutl_parameter(%[[ARG1]]) : (index) -> index + %res = call @mutl_parameter(%arg0, %arg1, %arg2) : (index, index, index) -> (index) + return %res : index +} diff --git a/mlir/test/lib/Dialect/Test/TestDialect.cpp b/mlir/test/lib/Dialect/Test/TestDialect.cpp index 987e8f3654ce8..21d75f58b0a3a 100644 --- a/mlir/test/lib/Dialect/Test/TestDialect.cpp +++ b/mlir/test/lib/Dialect/Test/TestDialect.cpp @@ -431,3 +431,47 @@ void TestDialect::getCanonicalizationPatterns( RewritePatternSet &results) const { results.add(&dialectCanonicalizationPattern); } + +//===----------------------------------------------------------------------===// +// TestCallWithSegmentsOp +//===----------------------------------------------------------------------===// +// The op `test.call_with_segments` models a call-like operation whose operands +// are divided into 3 variadic segments: `prefix`, `args`, and `suffix`. +// Only the middle segment represents the actual call arguments. The op uses +// the AttrSizedOperandSegments trait, so we can derive segment boundaries from +// the generated `operandSegmentSizes` attribute. We provide custom helpers to +// expose the logical call arguments as both a read-only range and a mutable +// range bound to the proper segment so that insertion/erasure updates the +// attribute automatically. + +// Segment layout indices in the DenseI32ArrayAttr: [prefix, args, suffix]. +static constexpr unsigned kTestCallWithSegmentsArgsSegIndex = 1; + +Operation::operand_range CallWithSegmentsOp::getArgOperands() { + // Leverage generated getters for segment sizes: slice between prefix and + // suffix using current operand list. + return getOperation()->getOperands().slice(getPrefix().size(), + getArgs().size()); +} + +MutableOperandRange CallWithSegmentsOp::getArgOperandsMutable() { + Operation *op = getOperation(); + + // Obtain the canonical segment size attribute name for this op. + auto segName = + CallWithSegmentsOp::getOperandSegmentSizesAttrName(op->getName()); + auto sizesAttr = op->getAttrOfType(segName); + assert(sizesAttr && "missing operandSegmentSizes attribute on op"); + + // Compute the start and length of the args segment from the prefix size and + // args size stored in the attribute. + auto sizes = sizesAttr.asArrayRef(); + unsigned start = static_cast(sizes[0]); // prefix size + unsigned len = static_cast(sizes[1]); // args size + + NamedAttribute segNamed(segName, sizesAttr); + MutableOperandRange::OperandSegment binding{kTestCallWithSegmentsArgsSegIndex, + segNamed}; + + return MutableOperandRange(op, start, len, {binding}); +} diff --git a/mlir/test/lib/Dialect/Test/TestOps.td b/mlir/test/lib/Dialect/Test/TestOps.td index d9bbb3261febc..6ea27187655ee 100644 --- a/mlir/test/lib/Dialect/Test/TestOps.td +++ b/mlir/test/lib/Dialect/Test/TestOps.td @@ -3746,4 +3746,47 @@ def TestOpWithSuccessorRef : TEST_Op<"dummy_op_with_successor_ref"> { }]; } +def CallWithSegmentsOp : TEST_Op<"call_with_segments", + [AttrSizedOperandSegments, + DeclareOpInterfaceMethods]> { + let summary = "test call op with segmented args"; + let arguments = (ins + FlatSymbolRefAttr:$callee, + Variadic:$prefix, // non-arg segment (e.g., 'in') + Variadic:$args, // <-- the call *arguments* segment + Variadic:$suffix // non-arg segment (e.g., 'out') + ); + let results = (outs); + let assemblyFormat = [{ + $callee `(` $prefix `:` type($prefix) `)` + `(` $args `:` type($args) `)` + `(` $suffix `:` type($suffix) `)` attr-dict + }]; + + // Provide stub implementations for the ArgAndResultAttrsOpInterface. + let extraClassDeclaration = [{ + ::mlir::ArrayAttr getArgAttrsAttr() { return {}; } + ::mlir::ArrayAttr getResAttrsAttr() { return {}; } + void setArgAttrsAttr(::mlir::ArrayAttr) {} + void setResAttrsAttr(::mlir::ArrayAttr) {} + ::mlir::Attribute removeArgAttrsAttr() { return {}; } + ::mlir::Attribute removeResAttrsAttr() { return {}; } + }]; + + let extraClassDefinition = [{ + ::mlir::CallInterfaceCallable $cppClass::getCallableForCallee() { + if (auto sym = (*this)->getAttrOfType<::mlir::SymbolRefAttr>("callee")) + return ::mlir::CallInterfaceCallable(sym); + return ::mlir::CallInterfaceCallable(); + } + void $cppClass::setCalleeFromCallable(::mlir::CallInterfaceCallable callee) { + if (auto sym = callee.dyn_cast<::mlir::SymbolRefAttr>()) + (*this)->setAttr("callee", sym); + else + (*this)->removeAttr("callee"); + } + }]; +} + + #endif // TEST_OPS diff --git a/mlir/test/lib/Transforms/TestSingleFold.cpp b/mlir/test/lib/Transforms/TestSingleFold.cpp index 5bd9dd2a1f075..e55f36aea0a7c 100644 --- a/mlir/test/lib/Transforms/TestSingleFold.cpp +++ b/mlir/test/lib/Transforms/TestSingleFold.cpp @@ -26,6 +26,9 @@ struct TestSingleFold : public PassWrapper>, public RewriterBase::Listener { MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSingleFold) + TestSingleFold() = default; + TestSingleFold(const TestSingleFold &pass) : PassWrapper(pass) {} + StringRef getArgument() const final { return "test-single-fold"; } StringRef getDescription() const final { return "Test single-pass operation folding and dead constant elimination"; @@ -45,13 +48,18 @@ struct TestSingleFold : public PassWrapper>, if (it != existingConstants.end()) existingConstants.erase(it); } + + Option maxIterations{*this, "max-iterations", + llvm::cl::desc("Max iterations in the tryToFold"), + llvm::cl::init(1)}; }; } // namespace void TestSingleFold::foldOperation(Operation *op, OperationFolder &helper) { // Attempt to fold the specified operation, including handling unused or // duplicated constants. - (void)helper.tryToFold(op); + bool inPlaceUpdate = false; + (void)helper.tryToFold(op, &inPlaceUpdate, maxIterations); } void TestSingleFold::runOnOperation() { diff --git a/mlir/test/mlir-tblgen/attr-duplicated-builder-error.td b/mlir/test/mlir-tblgen/attr-duplicated-builder-error.td new file mode 100644 index 0000000000000..5f1c61a3a505d --- /dev/null +++ b/mlir/test/mlir-tblgen/attr-duplicated-builder-error.td @@ -0,0 +1,48 @@ +// RUN: not mlir-tblgen -gen-attrdef-decls -I %S/../../include %s 2>&1 | FileCheck %s + +include "mlir/IR/OpBase.td" + +def Test_Dialect : Dialect { + let name = "test"; + let cppNamespace = "::test"; +} + +class TestAttr traits = []> + : AttrDef { + let mnemonic = attrMnemonic; +} + +def TestAttr : TestAttr<"Test", "test"> { + let summary = "Test attrubute"; + let description = "Test attribute"; + + let parameters = (ins AttrParameter<"std::int64_t", "arg">:$arg); + let builders = [AttrBuilder<(ins "std::int64_t":$arg), [{ + return $_get($_ctxt, arg); + }]>]; + + let assemblyFormat = "`<` $arg `>`"; + + let skipDefaultBuilders = 0; + let genVerifyDecl = 1; + let genMnemonicAlias = 1; +} + +def Test_TestAttrOp : Op { + let summary = "test operation with attribute"; + let description = "test operation with attribute"; + + let arguments = (ins TestAttr:$testAttr); + let assemblyFormat = "$testAttr attr-dict"; +} + +// CHECK: attr-duplicated-builder-error.td:20:7: error: builder `get` conflicts with an existing builder. +// CHECK-NEXT: let builders = [AttrBuilder<(ins "std::int64_t":$arg), [{ +// CHECK-NEXT: ^ +// CHECK-NEXT: note: A new builder with signature: +// CHECK-NEXT: static TestAttr get(::mlir::MLIRContext *context, std::int64_t arg); +// CHECK-EMPTY: +// CHECK-NEXT: is shadowed by an existing builder with signature: +// CHECK-NEXT: static TestAttr get(::mlir::MLIRContext *context, std::int64_t arg); +// CHECK-EMPTY: +// CHECK-NEXT: Please remove one of the conflicting definitions. diff --git a/mlir/test/mlir-tblgen/attr-duplicated-custom-builders-error.td b/mlir/test/mlir-tblgen/attr-duplicated-custom-builders-error.td new file mode 100644 index 0000000000000..0e09f667c1ccd --- /dev/null +++ b/mlir/test/mlir-tblgen/attr-duplicated-custom-builders-error.td @@ -0,0 +1,52 @@ +// RUN: not mlir-tblgen -gen-attrdef-decls -I %S/../../include %s 2>&1 | FileCheck %s + +include "mlir/IR/OpBase.td" + +def Test_Dialect : Dialect { + let name = "test"; + let cppNamespace = "::test"; +} + +class TestAttr traits = []> + : AttrDef { + let mnemonic = attrMnemonic; +} + +def TestAttr : TestAttr<"Test", "test"> { + let summary = "Test attrubute"; + let description = "Test attribute"; + + let parameters = (ins AttrParameter<"std::int64_t", "arg">:$arg); + let builders = [AttrBuilder<(ins "std::int64_t":$arg), [{ + return $_get($_ctxt, arg); + }]>, + AttrBuilder<(ins "std::int64_t":$arg), [{ + // Duplicated builder + return $_get($_ctxt, arg); + }]>]; + + let assemblyFormat = "`<` $arg `>`"; + + let skipDefaultBuilders = 1; + let genVerifyDecl = 1; + let genMnemonicAlias = 1; +} + +def Test_TestAttrOp : Op { + let summary = "test operation with attribute"; + let description = "test operation with attribute"; + + let arguments = (ins TestAttr:$testAttr); + let assemblyFormat = "$testAttr attr-dict"; +} + +// CHECK: attr-duplicated-custom-builders-error.td:20:7: error: builder `get` conflicts with an existing builder. +// CHECK-NEXT: let builders = [AttrBuilder<(ins "std::int64_t":$arg), [{ +// CHECK-NEXT: ^ +// CHECK-NEXT: note: A new builder with signature: +// CHECK-NEXT: static TestAttr get(::mlir::MLIRContext *context, std::int64_t arg); +// CHECK-EMPTY: +// CHECK-NEXT: is shadowed by an existing builder with signature: +// CHECK-NEXT: static TestAttr get(::mlir::MLIRContext *context, std::int64_t arg); +// CHECK-EMPTY: +// CHECK-NEXT: Please remove one of the conflicting definitions. diff --git a/mlir/test/python/ir/builtin_types.py b/mlir/test/python/ir/builtin_types.py index b42bfd9bc6587..54863253fc770 100644 --- a/mlir/test/python/ir/builtin_types.py +++ b/mlir/test/python/ir/builtin_types.py @@ -371,11 +371,16 @@ def testAbstractShapedType(): # CHECK-LABEL: TEST: testVectorType @run def testVectorType(): + shape = [2, 3] + with Context(): + f32 = F32Type.get() + # CHECK: unchecked vector type: vector<2x3xf32> + print("unchecked vector type:", VectorType.get_unchecked(shape, f32)) + with Context(), Location.unknown(): f32 = F32Type.get() - shape = [2, 3] - # CHECK: vector type: vector<2x3xf32> - print("vector type:", VectorType.get(shape, f32)) + # CHECK: checked vector type: vector<2x3xf32> + print("checked vector type:", VectorType.get(shape, f32)) none = NoneType.get() try: diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp index 3140f12c0b7e8..b9115657d6bf3 100644 --- a/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeDefGen.cpp @@ -513,14 +513,57 @@ getCustomBuilderParams(std::initializer_list prefix, return builderParams; } +static std::string getSignature(const Method &m) { + std::string signature; + llvm::raw_string_ostream os(signature); + raw_indented_ostream indentedOs(os); + m.writeDeclTo(indentedOs); + return signature; +} + +static void emitDuplicatedBuilderError(const Method ¤tMethod, + StringRef methodName, + const Class &defCls, + const AttrOrTypeDef &def) { + + // Try to search for method that makes `get` redundant. + auto loc = def.getDef()->getFieldLoc("builders"); + for (auto &method : defCls.getMethods()) { + if (method->getName() == methodName && + method->makesRedundant(currentMethod)) { + PrintError(loc, llvm::Twine("builder `") + methodName + + "` conflicts with an existing builder. "); + PrintFatalNote(llvm::Twine("A new builder with signature:\n") + + getSignature(currentMethod) + + "\nis shadowed by an existing builder with signature:\n" + + getSignature(*method) + + "\nPlease remove one of the conflicting " + "definitions."); + } + } + + // This code shouldn't be reached, but leaving this here for potential future + // use. + PrintFatalError(loc, "Failed to generate builder " + methodName); +} + void DefGen::emitCustomBuilder(const AttrOrTypeBuilder &builder) { // Don't emit a body if there isn't one. auto props = builder.getBody() ? Method::Static : Method::StaticDeclaration; StringRef returnType = def.getCppClassName(); if (std::optional builderReturnType = builder.getReturnType()) returnType = *builderReturnType; - Method *m = defCls.addMethod(returnType, "get", props, - getCustomBuilderParams({}, builder)); + + llvm::StringRef methodName = "get"; + const auto parameters = getCustomBuilderParams({}, builder); + Method *m = defCls.addMethod(returnType, methodName, props, parameters); + + // If method is pruned, report error and terminate. + if (!m) { + auto curMethod = Method(returnType, methodName, props, parameters); + emitDuplicatedBuilderError(curMethod, methodName, defCls, def); + } + if (!builder.getBody()) return; @@ -547,11 +590,19 @@ void DefGen::emitCheckedCustomBuilder(const AttrOrTypeBuilder &builder) { StringRef returnType = def.getCppClassName(); if (std::optional builderReturnType = builder.getReturnType()) returnType = *builderReturnType; - Method *m = defCls.addMethod( - returnType, "getChecked", props, - getCustomBuilderParams( - {{"::llvm::function_ref<::mlir::InFlightDiagnostic()>", "emitError"}}, - builder)); + + llvm::StringRef methodName = "getChecked"; + auto parameters = getCustomBuilderParams( + {{"::llvm::function_ref<::mlir::InFlightDiagnostic()>", "emitError"}}, + builder); + Method *m = defCls.addMethod(returnType, methodName, props, parameters); + + // If method is pruned, report error and terminate. + if (!m) { + auto curMethod = Method(returnType, methodName, props, parameters); + emitDuplicatedBuilderError(curMethod, methodName, defCls, def); + } + if (!builder.getBody()) return; diff --git a/mlir/tools/mlir-tblgen/EnumsGen.cpp b/mlir/tools/mlir-tblgen/EnumsGen.cpp index d152763f7382e..d4d32f5885971 100644 --- a/mlir/tools/mlir-tblgen/EnumsGen.cpp +++ b/mlir/tools/mlir-tblgen/EnumsGen.cpp @@ -364,6 +364,9 @@ getAllBitsUnsetCase(llvm::ArrayRef cases) { // inline constexpr operator|( a, b); // inline constexpr operator&( a, b); // inline constexpr operator^( a, b); +// inline constexpr &operator|=( &a, b); +// inline constexpr &operator&=( &a, b); +// inline constexpr &operator^=( &a, b); // inline constexpr operator~( bits); // inline constexpr bool bitEnumContainsAll( bits, bit); // inline constexpr bool bitEnumContainsAny( bits, bit); @@ -385,6 +388,15 @@ inline constexpr {0} operator&({0} a, {0} b) {{ inline constexpr {0} operator^({0} a, {0} b) {{ return static_cast<{0}>(static_cast<{1}>(a) ^ static_cast<{1}>(b)); } +inline constexpr {0} &operator|=({0} &a, {0} b) {{ + return a = a | b; +} +inline constexpr {0} &operator&=({0} &a, {0} b) {{ + return a = a & b; +} +inline constexpr {0} &operator^=({0} &a, {0} b) {{ + return a = a ^ b; +} inline constexpr {0} operator~({0} bits) {{ // Ensure only bits that can be present in the enum are set return static_cast<{0}>(~static_cast<{1}>(bits) & static_cast<{1}>({2}u)); diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp index 4fdde76a613bb..7e8e559baf878 100644 --- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp +++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp @@ -3104,8 +3104,8 @@ void OpEmitter::genBuilder() { std::optional body = builder.getBody(); auto properties = body ? Method::Static : Method::StaticDeclaration; auto *method = opClass.addMethod("void", "build", properties, arguments); - if (body) - ERROR_IF_PRUNED(method, "build", op); + + ERROR_IF_PRUNED(method, "build", op); if (method) method->setDeprecated(builder.getDeprecatedMessage()); diff --git a/offload/plugins-nextgen/amdgpu/src/rtl.cpp b/offload/plugins-nextgen/amdgpu/src/rtl.cpp index 64470e9fabf46..7b834ee346e5d 100644 --- a/offload/plugins-nextgen/amdgpu/src/rtl.cpp +++ b/offload/plugins-nextgen/amdgpu/src/rtl.cpp @@ -423,7 +423,11 @@ struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy { assert(MemoryManager && "Invalid memory manager"); assert(PtrStorage && "Invalid pointer storage"); - *PtrStorage = MemoryManager->allocate(Size, nullptr); + auto PtrStorageOrErr = MemoryManager->allocate(Size, nullptr); + if (!PtrStorageOrErr) + return PtrStorageOrErr.takeError(); + + *PtrStorage = *PtrStorageOrErr; if (Size && *PtrStorage == nullptr) return Plugin::error(ErrorCode::OUT_OF_RESOURCES, "failure to allocate from AMDGPU memory manager"); @@ -443,15 +447,12 @@ struct AMDGPUMemoryManagerTy : public DeviceAllocatorTy { private: /// Allocation callback that will be called once the memory manager does not /// have more previously allocated buffers. - void *allocate(size_t Size, void *HstPtr, TargetAllocTy Kind) override; + Expected allocate(size_t Size, void *HstPtr, + TargetAllocTy Kind) override; /// Deallocation callback that will be called by the memory manager. - int free(void *TgtPtr, TargetAllocTy Kind) override { - if (auto Err = MemoryPool->deallocate(TgtPtr)) { - consumeError(std::move(Err)); - return OFFLOAD_FAIL; - } - return OFFLOAD_SUCCESS; + Error free(void *TgtPtr, TargetAllocTy Kind) override { + return MemoryPool->deallocate(TgtPtr); } /// The underlying plugin that owns this memory manager. @@ -2339,12 +2340,12 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy { } /// Allocate memory on the device or related to the device. - void *allocate(size_t Size, void *, TargetAllocTy Kind) override; + Expected allocate(size_t Size, void *, TargetAllocTy Kind) override; /// Deallocate memory on the device or related to the device. - int free(void *TgtPtr, TargetAllocTy Kind) override { + Error free(void *TgtPtr, TargetAllocTy Kind) override { if (TgtPtr == nullptr) - return OFFLOAD_SUCCESS; + return Plugin::success(); AMDGPUMemoryPoolTy *MemoryPool = nullptr; switch (Kind) { @@ -2360,17 +2361,14 @@ struct AMDGPUDeviceTy : public GenericDeviceTy, AMDGenericDeviceTy { break; } - if (!MemoryPool) { - REPORT("No memory pool for the specified allocation kind\n"); - return OFFLOAD_FAIL; - } + if (!MemoryPool) + return Plugin::error(ErrorCode::OUT_OF_RESOURCES, + "no memory pool for the specified allocation kind"); - if (Error Err = MemoryPool->deallocate(TgtPtr)) { - REPORT("%s\n", toString(std::move(Err)).data()); - return OFFLOAD_FAIL; - } + if (auto Err = MemoryPool->deallocate(TgtPtr)) + return Err; - return OFFLOAD_SUCCESS; + return Plugin::success(); } /// Synchronize current thread with the pending operations on the async info. @@ -3813,14 +3811,13 @@ static Error Plugin::check(int32_t Code, const char *ErrFmt, ArgsTy... Args) { return Plugin::error(OffloadErrCode, ErrFmt, Args..., Desc); } -void *AMDGPUMemoryManagerTy::allocate(size_t Size, void *HstPtr, - TargetAllocTy Kind) { +Expected AMDGPUMemoryManagerTy::allocate(size_t Size, void *HstPtr, + TargetAllocTy Kind) { // Allocate memory from the pool. void *Ptr = nullptr; - if (auto Err = MemoryPool->allocate(Size, &Ptr)) { - consumeError(std::move(Err)); - return nullptr; - } + if (auto Err = MemoryPool->allocate(Size, &Ptr)) + return std::move(Err); + assert(Ptr && "Invalid pointer"); // Get a list of agents that can access this memory pool. @@ -3830,14 +3827,13 @@ void *AMDGPUMemoryManagerTy::allocate(size_t Size, void *HstPtr, [&](hsa_agent_t Agent) { return MemoryPool->canAccess(Agent); }); // Allow all valid kernel agents to access the allocation. - if (auto Err = MemoryPool->enableAccess(Ptr, Size, Agents)) { - REPORT("%s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = MemoryPool->enableAccess(Ptr, Size, Agents)) + return std::move(Err); return Ptr; } -void *AMDGPUDeviceTy::allocate(size_t Size, void *, TargetAllocTy Kind) { +Expected AMDGPUDeviceTy::allocate(size_t Size, void *, + TargetAllocTy Kind) { if (Size == 0) return nullptr; @@ -3856,17 +3852,14 @@ void *AMDGPUDeviceTy::allocate(size_t Size, void *, TargetAllocTy Kind) { break; } - if (!MemoryPool) { - REPORT("No memory pool for the specified allocation kind\n"); - return nullptr; - } + if (!MemoryPool) + return Plugin::error(ErrorCode::UNSUPPORTED, + "no memory pool for the specified allocation kind"); // Allocate from the corresponding memory pool. void *Alloc = nullptr; - if (Error Err = MemoryPool->allocate(Size, &Alloc)) { - REPORT("%s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = MemoryPool->allocate(Size, &Alloc)) + return std::move(Err); if (Alloc) { // Get a list of agents that can access this memory pool. Inherently @@ -3879,10 +3872,8 @@ void *AMDGPUDeviceTy::allocate(size_t Size, void *, TargetAllocTy Kind) { }); // Enable all valid kernel agents to access the buffer. - if (auto Err = MemoryPool->enableAccess(Alloc, Size, Agents)) { - REPORT("%s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = MemoryPool->enableAccess(Alloc, Size, Agents)) + return std::move(Err); } return Alloc; diff --git a/offload/plugins-nextgen/common/include/MemoryManager.h b/offload/plugins-nextgen/common/include/MemoryManager.h index a4f6e628c403a..8f6c1adcdaa58 100644 --- a/offload/plugins-nextgen/common/include/MemoryManager.h +++ b/offload/plugins-nextgen/common/include/MemoryManager.h @@ -25,6 +25,10 @@ #include "Shared/Utils.h" #include "omptarget.h" +#include "llvm/Support/Error.h" + +namespace llvm { + /// Base class of per-device allocator. class DeviceAllocatorTy { public: @@ -32,11 +36,13 @@ class DeviceAllocatorTy { /// Allocate a memory of size \p Size . \p HstPtr is used to assist the /// allocation. - virtual void *allocate(size_t Size, void *HstPtr, - TargetAllocTy Kind = TARGET_ALLOC_DEFAULT) = 0; + virtual Expected + allocate(size_t Size, void *HstPtr, + TargetAllocTy Kind = TARGET_ALLOC_DEFAULT) = 0; /// Delete the pointer \p TgtPtr on the device - virtual int free(void *TgtPtr, TargetAllocTy Kind = TARGET_ALLOC_DEFAULT) = 0; + virtual Error free(void *TgtPtr, + TargetAllocTy Kind = TARGET_ALLOC_DEFAULT) = 0; }; /// Class of memory manager. The memory manager is per-device by using @@ -134,17 +140,17 @@ class MemoryManagerTy { size_t SizeThreshold = 1U << 13; /// Request memory from target device - void *allocateOnDevice(size_t Size, void *HstPtr) const { + Expected allocateOnDevice(size_t Size, void *HstPtr) const { return DeviceAllocator.allocate(Size, HstPtr, TARGET_ALLOC_DEVICE); } /// Deallocate data on device - int deleteOnDevice(void *Ptr) const { return DeviceAllocator.free(Ptr); } + Error deleteOnDevice(void *Ptr) const { return DeviceAllocator.free(Ptr); } /// This function is called when it tries to allocate memory on device but the /// device returns out of memory. It will first free all memory in the /// FreeList and try to allocate again. - void *freeAndAllocate(size_t Size, void *HstPtr) { + Expected freeAndAllocate(size_t Size, void *HstPtr) { std::vector RemoveList; // Deallocate all memory in FreeList @@ -154,7 +160,8 @@ class MemoryManagerTy { if (List.empty()) continue; for (const NodeTy &N : List) { - deleteOnDevice(N.Ptr); + if (auto Err = deleteOnDevice(N.Ptr)) + return Err; RemoveList.push_back(N.Ptr); } FreeLists[I].clear(); @@ -175,14 +182,22 @@ class MemoryManagerTy { /// allocate directly on the device. If a \p nullptr is returned, it might /// be because the device is OOM. In that case, it will free all unused /// memory and then try again. - void *allocateOrFreeAndAllocateOnDevice(size_t Size, void *HstPtr) { - void *TgtPtr = allocateOnDevice(Size, HstPtr); + Expected allocateOrFreeAndAllocateOnDevice(size_t Size, + void *HstPtr) { + auto TgtPtrOrErr = allocateOnDevice(Size, HstPtr); + if (!TgtPtrOrErr) + return TgtPtrOrErr.takeError(); + + void *TgtPtr = *TgtPtrOrErr; // We cannot get memory from the device. It might be due to OOM. Let's // free all memory in FreeLists and try again. if (TgtPtr == nullptr) { DP("Failed to get memory on device. Free all memory in FreeLists and " "try again.\n"); - TgtPtr = freeAndAllocate(Size, HstPtr); + TgtPtrOrErr = freeAndAllocate(Size, HstPtr); + if (!TgtPtrOrErr) + return TgtPtrOrErr.takeError(); + TgtPtr = *TgtPtrOrErr; } if (TgtPtr == nullptr) @@ -204,16 +219,17 @@ class MemoryManagerTy { /// Destructor ~MemoryManagerTy() { - for (auto Itr = PtrToNodeTable.begin(); Itr != PtrToNodeTable.end(); - ++Itr) { - assert(Itr->second.Ptr && "nullptr in map table"); - deleteOnDevice(Itr->second.Ptr); + for (auto &PtrToNode : PtrToNodeTable) { + assert(PtrToNode.second.Ptr && "nullptr in map table"); + if (auto Err = deleteOnDevice(PtrToNode.second.Ptr)) + REPORT("Failure to delete memory: %s\n", + toString(std::move(Err)).data()); } } /// Allocate memory of size \p Size from target device. \p HstPtr is used to /// assist the allocation. - void *allocate(size_t Size, void *HstPtr) { + Expected allocate(size_t Size, void *HstPtr) { // If the size is zero, we will not bother the target device. Just return // nullptr directly. if (Size == 0) @@ -228,11 +244,14 @@ class MemoryManagerTy { DP("%zu is greater than the threshold %zu. Allocate it directly from " "device\n", Size, SizeThreshold); - void *TgtPtr = allocateOrFreeAndAllocateOnDevice(Size, HstPtr); + auto TgtPtrOrErr = allocateOrFreeAndAllocateOnDevice(Size, HstPtr); + if (!TgtPtrOrErr) + return TgtPtrOrErr.takeError(); - DP("Got target pointer " DPxMOD ". Return directly.\n", DPxPTR(TgtPtr)); + DP("Got target pointer " DPxMOD ". Return directly.\n", + DPxPTR(*TgtPtrOrErr)); - return TgtPtr; + return *TgtPtrOrErr; } NodeTy *NodePtr = nullptr; @@ -260,8 +279,11 @@ class MemoryManagerTy { if (NodePtr == nullptr) { DP("Cannot find a node in the FreeLists. Allocate on device.\n"); // Allocate one on device - void *TgtPtr = allocateOrFreeAndAllocateOnDevice(Size, HstPtr); + auto TgtPtrOrErr = allocateOrFreeAndAllocateOnDevice(Size, HstPtr); + if (!TgtPtrOrErr) + return TgtPtrOrErr.takeError(); + void *TgtPtr = *TgtPtrOrErr; if (TgtPtr == nullptr) return nullptr; @@ -282,7 +304,7 @@ class MemoryManagerTy { } /// Deallocate memory pointed by \p TgtPtr - int free(void *TgtPtr) { + Error free(void *TgtPtr) { DP("MemoryManagerTy::free: target memory " DPxMOD ".\n", DPxPTR(TgtPtr)); NodeTy *P = nullptr; @@ -314,7 +336,7 @@ class MemoryManagerTy { FreeLists[B].insert(*P); } - return OFFLOAD_SUCCESS; + return Error::success(); } /// Get the size threshold from the environment variable @@ -344,4 +366,6 @@ class MemoryManagerTy { constexpr const size_t MemoryManagerTy::BucketSize[]; constexpr const int MemoryManagerTy::NumBuckets; +} // namespace llvm + #endif // LLVM_OPENMP_LIBOMPTARGET_PLUGINS_COMMON_MEMORYMANAGER_H diff --git a/offload/plugins-nextgen/common/src/PluginInterface.cpp b/offload/plugins-nextgen/common/src/PluginInterface.cpp index 7d05dd25dbf75..15b6b9866e5a2 100644 --- a/offload/plugins-nextgen/common/src/PluginInterface.cpp +++ b/offload/plugins-nextgen/common/src/PluginInterface.cpp @@ -73,11 +73,17 @@ struct RecordReplayTy { }; llvm::SmallVector GlobalEntries{}; - void *suggestAddress(uint64_t MaxMemoryAllocation) { + Expected suggestAddress(uint64_t MaxMemoryAllocation) { // Get a valid pointer address for this system - void *Addr = + auto AddrOrErr = Device->allocate(1024, /*HstPtr=*/nullptr, TARGET_ALLOC_DEFAULT); - Device->free(Addr); + if (!AddrOrErr) + return AddrOrErr.takeError(); + + void *Addr = *AddrOrErr; + if (auto Err = Device->free(Addr)) + return std::move(Err); + // Align Address to MaxMemoryAllocation Addr = (void *)utils::alignPtr((Addr), MaxMemoryAllocation); return Addr; @@ -86,8 +92,12 @@ struct RecordReplayTy { Error preAllocateVAMemory(uint64_t MaxMemoryAllocation, void *VAddr) { size_t ASize = MaxMemoryAllocation; - if (!VAddr && isRecording()) - VAddr = suggestAddress(MaxMemoryAllocation); + if (!VAddr && isRecording()) { + auto VAddrOrErr = suggestAddress(MaxMemoryAllocation); + if (!VAddrOrErr) + return VAddrOrErr.takeError(); + VAddr = *VAddrOrErr; + } DP("Request %ld bytes allocated at %p\n", MaxMemoryAllocation, VAddr); @@ -117,8 +127,11 @@ struct RecordReplayTy { constexpr size_t STEP = 1024 * 1024 * 1024ULL; MemoryStart = nullptr; for (TotalSize = MAX_MEMORY_ALLOCATION; TotalSize > 0; TotalSize -= STEP) { - MemoryStart = + auto MemoryStartOrErr = Device->allocate(TotalSize, /*HstPtr=*/nullptr, TARGET_ALLOC_DEFAULT); + if (!MemoryStartOrErr) + return MemoryStartOrErr.takeError(); + MemoryStart = *MemoryStartOrErr; if (MemoryStart) break; } @@ -352,13 +365,15 @@ struct RecordReplayTy { return Plugin::success(); } - void deinit() { + Error deinit() { if (UsedVAMap) { if (auto Err = Device->memoryVAUnMap(MemoryStart, TotalSize)) - report_fatal_error("Error on releasing virtual memory space"); + return Err; } else { - Device->free(MemoryStart); + if (auto Err = Device->free(MemoryStart)) + return Err; } + return Plugin::success(); } }; } // namespace llvm::omp::target::plugin @@ -838,7 +853,8 @@ Error GenericDeviceTy::deinit(GenericPluginTy &Plugin) { RecordReplayTy &RecordReplay = Plugin.getRecordReplay(); if (RecordReplay.isRecordingOrReplaying()) - RecordReplay.deinit(); + if (auto Err = RecordReplay.deinit()) + return Err; if (RPCServer) if (auto Err = RPCServer->deinitDevice(*this)) @@ -1297,7 +1313,10 @@ Expected GenericDeviceTy::dataAlloc(int64_t Size, void *HostPtr, case TARGET_ALLOC_DEFAULT: case TARGET_ALLOC_DEVICE: if (MemoryManager) { - Alloc = MemoryManager->allocate(Size, HostPtr); + auto AllocOrErr = MemoryManager->allocate(Size, HostPtr); + if (!AllocOrErr) + return AllocOrErr.takeError(); + Alloc = *AllocOrErr; if (!Alloc) return Plugin::error(ErrorCode::OUT_OF_RESOURCES, "failed to allocate from memory manager"); @@ -1305,12 +1324,16 @@ Expected GenericDeviceTy::dataAlloc(int64_t Size, void *HostPtr, } [[fallthrough]]; case TARGET_ALLOC_HOST: - case TARGET_ALLOC_SHARED: - Alloc = allocate(Size, HostPtr, Kind); + case TARGET_ALLOC_SHARED: { + auto AllocOrErr = allocate(Size, HostPtr, Kind); + if (!AllocOrErr) + return AllocOrErr.takeError(); + Alloc = *AllocOrErr; if (!Alloc) return Plugin::error(ErrorCode::OUT_OF_RESOURCES, "failed to allocate from device allocator"); } + } // Report error if the memory manager or the device allocator did not return // any memory buffer. @@ -1382,28 +1405,19 @@ Error GenericDeviceTy::dataDelete(void *TgtPtr, TargetAllocTy Kind) { #undef DEALLOCATION_ERROR } - int Res; switch (Kind) { case TARGET_ALLOC_DEFAULT: case TARGET_ALLOC_DEVICE: if (MemoryManager) { - Res = MemoryManager->free(TgtPtr); - if (Res) - return Plugin::error( - ErrorCode::OUT_OF_RESOURCES, - "failure to deallocate device pointer %p via memory manager", - TgtPtr); + if (auto Err = MemoryManager->free(TgtPtr)) + return Err; break; } [[fallthrough]]; case TARGET_ALLOC_HOST: case TARGET_ALLOC_SHARED: - Res = free(TgtPtr, Kind); - if (Res) - return Plugin::error( - ErrorCode::UNKNOWN, - "failure to deallocate device pointer %p via device deallocator", - TgtPtr); + if (auto Err = free(TgtPtr, Kind)) + return Err; } // Unregister deallocated pinned memory buffer if the type is host memory. diff --git a/offload/plugins-nextgen/common/src/RPC.cpp b/offload/plugins-nextgen/common/src/RPC.cpp index 17d69b49b3b7e..e19f2ef94de6e 100644 --- a/offload/plugins-nextgen/common/src/RPC.cpp +++ b/offload/plugins-nextgen/common/src/RPC.cpp @@ -28,15 +28,22 @@ rpc::Status handleOffloadOpcodes(plugin::GenericDeviceTy &Device, switch (Port.get_opcode()) { case LIBC_MALLOC: { Port.recv_and_send([&](rpc::Buffer *Buffer, uint32_t) { - Buffer->data[0] = reinterpret_cast( - Device.allocate(Buffer->data[0], nullptr, TARGET_ALLOC_DEVICE)); + auto PtrOrErr = + Device.allocate(Buffer->data[0], nullptr, TARGET_ALLOC_DEVICE); + void *Ptr = nullptr; + if (!PtrOrErr) + llvm::consumeError(PtrOrErr.takeError()); + else + Ptr = *PtrOrErr; + Buffer->data[0] = reinterpret_cast(Ptr); }); break; } case LIBC_FREE: { Port.recv([&](rpc::Buffer *Buffer, uint32_t) { - Device.free(reinterpret_cast(Buffer->data[0]), - TARGET_ALLOC_DEVICE); + if (auto Err = Device.free(reinterpret_cast(Buffer->data[0]), + TARGET_ALLOC_DEVICE)) + llvm::consumeError(std::move(Err)); }); break; } @@ -171,9 +178,13 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device, plugin::DeviceImageTy &Image) { uint64_t NumPorts = std::min(Device.requestedRPCPortCount(), rpc::MAX_PORT_COUNT); - void *RPCBuffer = Device.allocate( + auto RPCBufferOrErr = Device.allocate( rpc::Server::allocation_size(Device.getWarpSize(), NumPorts), nullptr, TARGET_ALLOC_HOST); + if (!RPCBufferOrErr) + return RPCBufferOrErr.takeError(); + + void *RPCBuffer = *RPCBufferOrErr; if (!RPCBuffer) return plugin::Plugin::error( error::ErrorCode::UNKNOWN, @@ -198,7 +209,8 @@ Error RPCServerTy::initDevice(plugin::GenericDeviceTy &Device, Error RPCServerTy::deinitDevice(plugin::GenericDeviceTy &Device) { std::lock_guard Lock(BufferMutex); - Device.free(Buffers[Device.getDeviceId()], TARGET_ALLOC_HOST); + if (auto Err = Device.free(Buffers[Device.getDeviceId()], TARGET_ALLOC_HOST)) + return Err; Buffers[Device.getDeviceId()] = nullptr; Devices[Device.getDeviceId()] = nullptr; return Error::success(); diff --git a/offload/plugins-nextgen/cuda/src/rtl.cpp b/offload/plugins-nextgen/cuda/src/rtl.cpp index b2f840113cff3..b30c651223cad 100644 --- a/offload/plugins-nextgen/cuda/src/rtl.cpp +++ b/offload/plugins-nextgen/cuda/src/rtl.cpp @@ -561,14 +561,12 @@ struct CUDADeviceTy : public GenericDeviceTy { } /// Allocate memory on the device or related to the device. - void *allocate(size_t Size, void *, TargetAllocTy Kind) override { + Expected allocate(size_t Size, void *, TargetAllocTy Kind) override { if (Size == 0) return nullptr; - if (auto Err = setContext()) { - REPORT("Failure to alloc memory: %s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = setContext()) + return std::move(Err); void *MemAlloc = nullptr; CUdeviceptr DevicePtr; @@ -589,23 +587,18 @@ struct CUDADeviceTy : public GenericDeviceTy { break; } - if (auto Err = - Plugin::check(Res, "error in cuMemAlloc[Host|Managed]: %s")) { - REPORT("Failure to alloc memory: %s\n", toString(std::move(Err)).data()); - return nullptr; - } + if (auto Err = Plugin::check(Res, "error in cuMemAlloc[Host|Managed]: %s")) + return std::move(Err); return MemAlloc; } /// Deallocate memory on the device or related to the device. - int free(void *TgtPtr, TargetAllocTy Kind) override { + Error free(void *TgtPtr, TargetAllocTy Kind) override { if (TgtPtr == nullptr) - return OFFLOAD_SUCCESS; + return Plugin::success(); - if (auto Err = setContext()) { - REPORT("Failure to free memory: %s\n", toString(std::move(Err)).data()); - return OFFLOAD_FAIL; - } + if (auto Err = setContext()) + return Err; CUresult Res; switch (Kind) { @@ -619,11 +612,7 @@ struct CUDADeviceTy : public GenericDeviceTy { break; } - if (auto Err = Plugin::check(Res, "error in cuMemFree[Host]: %s")) { - REPORT("Failure to free memory: %s\n", toString(std::move(Err)).data()); - return OFFLOAD_FAIL; - } - return OFFLOAD_SUCCESS; + return Plugin::check(Res, "error in cuMemFree[Host]: %s"); } /// Synchronize current thread with the pending operations on the async info. @@ -1310,8 +1299,12 @@ struct CUDADeviceTy : public GenericDeviceTy { // Allocate a buffer to store all of the known constructor / destructor // functions in so we can iterate them on the device. - void *Buffer = + auto BufferOrErr = allocate(Funcs.size() * sizeof(void *), nullptr, TARGET_ALLOC_DEVICE); + if (!BufferOrErr) + return BufferOrErr.takeError(); + + void *Buffer = *BufferOrErr; if (!Buffer) return Plugin::error(ErrorCode::OUT_OF_RESOURCES, "failed to allocate memory for global buffer"); @@ -1360,12 +1353,10 @@ struct CUDADeviceTy : public GenericDeviceTy { Error Err = Plugin::success(); AsyncInfoWrapper.finalize(Err); + if (Err) + return Err; - if (free(Buffer, TARGET_ALLOC_DEVICE) != OFFLOAD_SUCCESS) - return Plugin::error(ErrorCode::UNKNOWN, - "failed to free memory for global buffer"); - - return Err; + return free(Buffer, TARGET_ALLOC_DEVICE); } /// Stream manager for CUDA streams. diff --git a/offload/plugins-nextgen/host/src/rtl.cpp b/offload/plugins-nextgen/host/src/rtl.cpp index 44e2584fe53cc..0845032d0aae2 100644 --- a/offload/plugins-nextgen/host/src/rtl.cpp +++ b/offload/plugins-nextgen/host/src/rtl.cpp @@ -240,7 +240,7 @@ struct GenELF64DeviceTy : public GenericDeviceTy { } /// Allocate memory. Use std::malloc in all cases. - void *allocate(size_t Size, void *, TargetAllocTy Kind) override { + Expected allocate(size_t Size, void *, TargetAllocTy Kind) override { if (Size == 0) return nullptr; @@ -257,9 +257,9 @@ struct GenELF64DeviceTy : public GenericDeviceTy { } /// Free the memory. Use std::free in all cases. - int free(void *TgtPtr, TargetAllocTy Kind) override { + Error free(void *TgtPtr, TargetAllocTy Kind) override { std::free(TgtPtr); - return OFFLOAD_SUCCESS; + return Plugin::success(); } /// This plugin does nothing to lock buffers. Do not return an error, just diff --git a/offload/test/offloading/fortran/target-no-loop.f90 b/offload/test/offloading/fortran/target-no-loop.f90 new file mode 100644 index 0000000000000..8e40e20e73e70 --- /dev/null +++ b/offload/test/offloading/fortran/target-no-loop.f90 @@ -0,0 +1,96 @@ +! REQUIRES: flang + +! RUN: %libomptarget-compile-fortran-generic -O3 -fopenmp-assume-threads-oversubscription -fopenmp-assume-teams-oversubscription +! RUN: env LIBOMPTARGET_INFO=16 OMP_NUM_TEAMS=16 OMP_TEAMS_THREAD_LIMIT=16 %libomptarget-run-generic 2>&1 | %fcheck-generic +function check_errors(array) result (errors) + integer, intent(in) :: array(1024) + integer :: errors + integer :: i + errors = 0 + do i = 1, 1024 + if ( array( i) .ne. (i) ) then + errors = errors + 1 + end if + end do +end function + +program main + use omp_lib + implicit none + integer :: i,j,red + integer :: array(1024), errors = 0 + array = 1 + + ! No-loop kernel + !$omp target teams distribute parallel do + do i = 1, 1024 + array(i) = i + end do + errors = errors + check_errors(array) + + ! SPMD kernel (num_teams clause blocks promotion to no-loop) + array = 1 + !$omp target teams distribute parallel do num_teams(3) + do i = 1, 1024 + array(i) = i + end do + + errors = errors + check_errors(array) + + ! No-loop kernel + array = 1 + !$omp target teams distribute parallel do num_threads(64) + do i = 1, 1024 + array(i) = i + end do + + errors = errors + check_errors(array) + + ! SPMD kernel + array = 1 + !$omp target parallel do + do i = 1, 1024 + array(i) = i + end do + + errors = errors + check_errors(array) + + ! Generic kernel + array = 1 + !$omp target teams distribute + do i = 1, 1024 + array(i) = i + end do + + errors = errors + check_errors(array) + + ! SPMD kernel (reduction clause blocks promotion to no-loop) + array = 1 + red =0 + !$omp target teams distribute parallel do reduction(+:red) + do i = 1, 1024 + red = red + array(i) + end do + + if (red .ne. 1024) then + errors = errors + 1 + end if + + print *,"number of errors: ", errors + +end program main + +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD-No-Loop mode +! CHECK: info: #Args: 3 Teams x Thrds: 64x 16 +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode +! CHECK: info: #Args: 3 Teams x Thrds: 3x 16 {{.*}} +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD-No-Loop mode +! CHECK: info: #Args: 3 Teams x Thrds: 64x 16 {{.*}} +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode +! CHECK: info: #Args: 3 Teams x Thrds: 1x 16 +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} Generic mode +! CHECK: info: #Args: 3 Teams x Thrds: 16x 16 {{.*}} +! CHECK: "PluginInterface" device {{[0-9]+}} info: Launching kernel {{.*}} SPMD mode +! CHECK: info: #Args: 4 Teams x Thrds: 16x 16 {{.*}} +! CHECK: number of errors: 0 + diff --git a/openmp/device/src/Workshare.cpp b/openmp/device/src/Workshare.cpp index 59a2cc3f27aca..653104ce883d1 100644 --- a/openmp/device/src/Workshare.cpp +++ b/openmp/device/src/Workshare.cpp @@ -800,10 +800,6 @@ template class StaticLoopChunker { // If we know we have more threads than iterations we can indicate that to // avoid an outer loop. - if (config::getAssumeThreadsOversubscription()) { - OneIterationPerThread = true; - } - if (OneIterationPerThread) ASSERT(NumThreads >= NumIters, "Broken assumption"); @@ -851,10 +847,6 @@ template class StaticLoopChunker { // If we know we have more blocks than iterations we can indicate that to // avoid an outer loop. - if (config::getAssumeTeamsOversubscription()) { - OneIterationPerThread = true; - } - if (OneIterationPerThread) ASSERT(NumBlocks >= NumIters, "Broken assumption"); @@ -914,11 +906,6 @@ template class StaticLoopChunker { // If we know we have more threads (across all blocks) than iterations we // can indicate that to avoid an outer loop. - if (config::getAssumeTeamsOversubscription() & - config::getAssumeThreadsOversubscription()) { - OneIterationPerThread = true; - } - if (OneIterationPerThread) ASSERT(NumBlocks * NumThreads >= NumIters, "Broken assumption"); diff --git a/openmp/runtime/test/transform/fuse/foreach.cpp b/openmp/runtime/test/transform/fuse/foreach.cpp new file mode 100644 index 0000000000000..176465b201faa --- /dev/null +++ b/openmp/runtime/test/transform/fuse/foreach.cpp @@ -0,0 +1,191 @@ +// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include +#include +#include + +struct Reporter { + const char *name; + + Reporter(const char *name) : name(name) { print("ctor"); } + + Reporter() : name("") { print("ctor"); } + + Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); } + + Reporter(Reporter &&that) : name(that.name) { print("move ctor"); } + + ~Reporter() { print("dtor"); } + + const Reporter &operator=(const Reporter &that) { + print("copy assign"); + this->name = that.name; + return *this; + } + + const Reporter &operator=(Reporter &&that) { + print("move assign"); + this->name = that.name; + return *this; + } + + struct Iterator { + const Reporter *owner; + int pos; + + Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {} + + Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) { + owner->print("iterator copy ctor"); + } + + Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) { + owner->print("iterator move ctor"); + } + + ~Iterator() { owner->print("iterator dtor"); } + + const Iterator &operator=(const Iterator &that) { + owner->print("iterator copy assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + const Iterator &operator=(Iterator &&that) { + owner->print("iterator move assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + bool operator==(const Iterator &that) const { + owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos); + return this->pos == that.pos; + } + + Iterator &operator++() { + owner->print("iterator prefix ++"); + pos -= 1; + return *this; + } + + Iterator operator++(int) { + owner->print("iterator postfix ++"); + auto result = *this; + pos -= 1; + return result; + } + + int operator*() const { + int result = 2 - pos; + owner->print("iterator deref: %i", result); + return result; + } + + size_t operator-(const Iterator &that) const { + int result = (2 - this->pos) - (2 - that.pos); + owner->print("iterator distance: %d", result); + return result; + } + + Iterator operator+(int steps) const { + owner->print("iterator advance: %i += %i", 2 - this->pos, steps); + return Iterator(owner, pos - steps); + } + + void print(const char *msg) const { owner->print(msg); } + }; + + Iterator begin() const { + print("begin()"); + return Iterator(this, 2); + } + + Iterator end() const { + print("end()"); + return Iterator(this, -1); + } + + void print(const char *msg, ...) const { + va_list args; + va_start(args, msg); + printf("[%s] ", name); + vprintf(msg, args); + printf("\n"); + va_end(args); + } +}; + +int main() { + printf("do\n"); +#pragma omp fuse + { + for (Reporter a{"C"}; auto &&v : Reporter("A")) + printf("v=%d\n", v); + for (Reporter aa{"D"}; auto &&vv : Reporter("B")) + printf("vv=%d\n", vv); + } + printf("done\n"); + return EXIT_SUCCESS; +} + +// CHECK: [C] ctor +// CHECK-NEXT: [A] ctor +// CHECK-NEXT: [A] end() +// CHECK-NEXT: [A] begin() +// CHECK-NEXT: [A] begin() +// CHECK-NEXT: [A] iterator distance: 3 +// CHECK-NEXT: [D] ctor +// CHECK-NEXT: [B] ctor +// CHECK-NEXT: [B] end() +// CHECK-NEXT: [B] begin() +// CHECK-NEXT: [B] begin() +// CHECK-NEXT: [B] iterator distance: 3 +// CHECK-NEXT: [A] iterator advance: 0 += 0 +// CHECK-NEXT: [A] iterator move assign +// CHECK-NEXT: [A] iterator deref: 0 +// CHECK-NEXT: v=0 +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [B] iterator advance: 0 += 0 +// CHECK-NEXT: [B] iterator move assign +// CHECK-NEXT: [B] iterator deref: 0 +// CHECK-NEXT: vv=0 +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [A] iterator advance: 0 += 1 +// CHECK-NEXT: [A] iterator move assign +// CHECK-NEXT: [A] iterator deref: 1 +// CHECK-NEXT: v=1 +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [B] iterator advance: 0 += 1 +// CHECK-NEXT: [B] iterator move assign +// CHECK-NEXT: [B] iterator deref: 1 +// CHECK-NEXT: vv=1 +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [A] iterator advance: 0 += 2 +// CHECK-NEXT: [A] iterator move assign +// CHECK-NEXT: [A] iterator deref: 2 +// CHECK-NEXT: v=2 +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [B] iterator advance: 0 += 2 +// CHECK-NEXT: [B] iterator move assign +// CHECK-NEXT: [B] iterator deref: 2 +// CHECK-NEXT: vv=2 +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [B] iterator dtor +// CHECK-NEXT: [B] dtor +// CHECK-NEXT: [D] dtor +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [A] iterator dtor +// CHECK-NEXT: [A] dtor +// CHECK-NEXT: [C] dtor +// CHECK-NEXT: done + +#endif diff --git a/openmp/runtime/test/transform/fuse/intfor.c b/openmp/runtime/test/transform/fuse/intfor.c new file mode 100644 index 0000000000000..b8171b4df7042 --- /dev/null +++ b/openmp/runtime/test/transform/fuse/intfor.c @@ -0,0 +1,50 @@ +// RUN: %libomp-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include + +int main() { + printf("do\n"); +#pragma omp fuse + { + for (int i = 5; i <= 25; i += 5) + printf("i=%d\n", i); + for (int j = 10; j < 100; j += 10) + printf("j=%d\n", j); + for (int k = 10; k > 0; --k) + printf("k=%d\n", k); + } + printf("done\n"); + return EXIT_SUCCESS; +} +#endif /* HEADER */ + +// CHECK: do +// CHECK-NEXT: i=5 +// CHECK-NEXT: j=10 +// CHECK-NEXT: k=10 +// CHECK-NEXT: i=10 +// CHECK-NEXT: j=20 +// CHECK-NEXT: k=9 +// CHECK-NEXT: i=15 +// CHECK-NEXT: j=30 +// CHECK-NEXT: k=8 +// CHECK-NEXT: i=20 +// CHECK-NEXT: j=40 +// CHECK-NEXT: k=7 +// CHECK-NEXT: i=25 +// CHECK-NEXT: j=50 +// CHECK-NEXT: k=6 +// CHECK-NEXT: j=60 +// CHECK-NEXT: k=5 +// CHECK-NEXT: j=70 +// CHECK-NEXT: k=4 +// CHECK-NEXT: j=80 +// CHECK-NEXT: k=3 +// CHECK-NEXT: j=90 +// CHECK-NEXT: k=2 +// CHECK-NEXT: k=1 +// CHECK-NEXT: done diff --git a/openmp/runtime/test/transform/fuse/iterfor.cpp b/openmp/runtime/test/transform/fuse/iterfor.cpp new file mode 100644 index 0000000000000..552484b2981c4 --- /dev/null +++ b/openmp/runtime/test/transform/fuse/iterfor.cpp @@ -0,0 +1,194 @@ +// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include +#include +#include + +struct Reporter { + const char *name; + + Reporter(const char *name) : name(name) { print("ctor"); } + + Reporter() : name("") { print("ctor"); } + + Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); } + + Reporter(Reporter &&that) : name(that.name) { print("move ctor"); } + + ~Reporter() { print("dtor"); } + + const Reporter &operator=(const Reporter &that) { + print("copy assign"); + this->name = that.name; + return *this; + } + + const Reporter &operator=(Reporter &&that) { + print("move assign"); + this->name = that.name; + return *this; + } + + struct Iterator { + const Reporter *owner; + int pos; + + Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {} + + Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) { + owner->print("iterator copy ctor"); + } + + Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) { + owner->print("iterator move ctor"); + } + + ~Iterator() { owner->print("iterator dtor"); } + + const Iterator &operator=(const Iterator &that) { + owner->print("iterator copy assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + const Iterator &operator=(Iterator &&that) { + owner->print("iterator move assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + bool operator==(const Iterator &that) const { + owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos); + return this->pos == that.pos; + } + + bool operator!=(const Iterator &that) const { + owner->print("iterator %d != %d", 2 - this->pos, 2 - that.pos); + return this->pos != that.pos; + } + + Iterator &operator++() { + owner->print("iterator prefix ++"); + pos -= 1; + return *this; + } + + Iterator operator++(int) { + owner->print("iterator postfix ++"); + auto result = *this; + pos -= 1; + return result; + } + + int operator*() const { + int result = 2 - pos; + owner->print("iterator deref: %i", result); + return result; + } + + size_t operator-(const Iterator &that) const { + int result = (2 - this->pos) - (2 - that.pos); + owner->print("iterator distance: %d", result); + return result; + } + + Iterator operator+(int steps) const { + owner->print("iterator advance: %i += %i", 2 - this->pos, steps); + return Iterator(owner, pos - steps); + } + }; + + Iterator begin() const { + print("begin()"); + return Iterator(this, 2); + } + + Iterator end() const { + print("end()"); + return Iterator(this, -1); + } + + void print(const char *msg, ...) const { + va_list args; + va_start(args, msg); + printf("[%s] ", name); + vprintf(msg, args); + printf("\n"); + va_end(args); + } +}; + +int main() { + printf("do\n"); + Reporter C("C"); + Reporter D("D"); +#pragma omp fuse + { + for (auto it = C.begin(); it != C.end(); ++it) + printf("v=%d\n", *it); + + for (auto it = D.begin(); it != D.end(); ++it) + printf("vv=%d\n", *it); + } + printf("done\n"); + return EXIT_SUCCESS; +} + +#endif /* HEADER */ + +// CHECK: do +// CHECK: [C] ctor +// CHECK-NEXT: [D] ctor +// CHECK-NEXT: [C] begin() +// CHECK-NEXT: [C] begin() +// CHECK-NEXT: [C] end() +// CHECK-NEXT: [C] iterator distance: 3 +// CHECK-NEXT: [D] begin() +// CHECK-NEXT: [D] begin() +// CHECK-NEXT: [D] end() +// CHECK-NEXT: [D] iterator distance: 3 +// CHECK-NEXT: [C] iterator advance: 0 += 0 +// CHECK-NEXT: [C] iterator move assign +// CHECK-NEXT: [C] iterator deref: 0 +// CHECK-NEXT: v=0 +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [D] iterator advance: 0 += 0 +// CHECK-NEXT: [D] iterator move assign +// CHECK-NEXT: [D] iterator deref: 0 +// CHECK-NEXT: vv=0 +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [C] iterator advance: 0 += 1 +// CHECK-NEXT: [C] iterator move assign +// CHECK-NEXT: [C] iterator deref: 1 +// CHECK-NEXT: v=1 +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [D] iterator advance: 0 += 1 +// CHECK-NEXT: [D] iterator move assign +// CHECK-NEXT: [D] iterator deref: 1 +// CHECK-NEXT: vv=1 +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [C] iterator advance: 0 += 2 +// CHECK-NEXT: [C] iterator move assign +// CHECK-NEXT: [C] iterator deref: 2 +// CHECK-NEXT: v=2 +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [D] iterator advance: 0 += 2 +// CHECK-NEXT: [D] iterator move assign +// CHECK-NEXT: [D] iterator deref: 2 +// CHECK-NEXT: vv=2 +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: done +// CHECK-NEXT: [D] iterator dtor +// CHECK-NEXT: [C] iterator dtor +// CHECK-NEXT: [D] dtor +// CHECK-NEXT: [C] dtor diff --git a/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-foreach.cpp b/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-foreach.cpp new file mode 100644 index 0000000000000..dcbbdf1b6734e --- /dev/null +++ b/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-foreach.cpp @@ -0,0 +1,207 @@ +// RUN: %libomp-cxx20-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include +#include +#include + +struct Reporter { + const char *name; + + Reporter(const char *name) : name(name) { print("ctor"); } + + Reporter() : name("") { print("ctor"); } + + Reporter(const Reporter &that) : name(that.name) { print("copy ctor"); } + + Reporter(Reporter &&that) : name(that.name) { print("move ctor"); } + + ~Reporter() { print("dtor"); } + + const Reporter &operator=(const Reporter &that) { + print("copy assign"); + this->name = that.name; + return *this; + } + + const Reporter &operator=(Reporter &&that) { + print("move assign"); + this->name = that.name; + return *this; + } + + struct Iterator { + const Reporter *owner; + int pos; + + Iterator(const Reporter *owner, int pos) : owner(owner), pos(pos) {} + + Iterator(const Iterator &that) : owner(that.owner), pos(that.pos) { + owner->print("iterator copy ctor"); + } + + Iterator(Iterator &&that) : owner(that.owner), pos(that.pos) { + owner->print("iterator move ctor"); + } + + ~Iterator() { owner->print("iterator dtor"); } + + const Iterator &operator=(const Iterator &that) { + owner->print("iterator copy assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + const Iterator &operator=(Iterator &&that) { + owner->print("iterator move assign"); + this->owner = that.owner; + this->pos = that.pos; + return *this; + } + + bool operator==(const Iterator &that) const { + owner->print("iterator %d == %d", 2 - this->pos, 2 - that.pos); + return this->pos == that.pos; + } + + Iterator &operator++() { + owner->print("iterator prefix ++"); + pos -= 1; + return *this; + } + + Iterator operator++(int) { + owner->print("iterator postfix ++"); + auto result = *this; + pos -= 1; + return result; + } + + int operator*() const { + int result = 2 - pos; + owner->print("iterator deref: %i", result); + return result; + } + + size_t operator-(const Iterator &that) const { + int result = (2 - this->pos) - (2 - that.pos); + owner->print("iterator distance: %d", result); + return result; + } + + Iterator operator+(int steps) const { + owner->print("iterator advance: %i += %i", 2 - this->pos, steps); + return Iterator(owner, pos - steps); + } + + void print(const char *msg) const { owner->print(msg); } + }; + + Iterator begin() const { + print("begin()"); + return Iterator(this, 2); + } + + Iterator end() const { + print("end()"); + return Iterator(this, -1); + } + + void print(const char *msg, ...) const { + va_list args; + va_start(args, msg); + printf("[%s] ", name); + vprintf(msg, args); + printf("\n"); + va_end(args); + } +}; + +int main() { + printf("do\n"); +#pragma omp parallel for collapse(2) num_threads(1) + for (int i = 0; i < 3; ++i) +#pragma omp fuse + { + for (Reporter c{"init-stmt"}; auto &&v : Reporter("range")) + printf("i=%d v=%d\n", i, v); + for (int vv = 0; vv < 3; ++vv) + printf("i=%d vv=%d\n", i, vv); + } + printf("done\n"); + return EXIT_SUCCESS; +} + +#endif /* HEADER */ + +// CHECK: do +// CHECK-NEXT: [init-stmt] ctor +// CHECK-NEXT: [range] ctor +// CHECK-NEXT: [range] end() +// CHECK-NEXT: [range] begin() +// CHECK-NEXT: [range] begin() +// CHECK-NEXT: [range] iterator distance: 3 +// CHECK-NEXT: [range] iterator advance: 0 += 0 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 0 +// CHECK-NEXT: i=0 v=0 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=0 vv=0 +// CHECK-NEXT: [range] iterator advance: 0 += 1 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 1 +// CHECK-NEXT: i=0 v=1 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=0 vv=1 +// CHECK-NEXT: [range] iterator advance: 0 += 2 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 2 +// CHECK-NEXT: i=0 v=2 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=0 vv=2 +// CHECK-NEXT: [range] iterator advance: 0 += 0 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 0 +// CHECK-NEXT: i=1 v=0 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=1 vv=0 +// CHECK-NEXT: [range] iterator advance: 0 += 1 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 1 +// CHECK-NEXT: i=1 v=1 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=1 vv=1 +// CHECK-NEXT: [range] iterator advance: 0 += 2 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 2 +// CHECK-NEXT: i=1 v=2 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=1 vv=2 +// CHECK-NEXT: [range] iterator advance: 0 += 0 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 0 +// CHECK-NEXT: i=2 v=0 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=2 vv=0 +// CHECK-NEXT: [range] iterator advance: 0 += 1 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 1 +// CHECK-NEXT: i=2 v=1 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=2 vv=1 +// CHECK-NEXT: [range] iterator advance: 0 += 2 +// CHECK-NEXT: [range] iterator move assign +// CHECK-NEXT: [range] iterator deref: 2 +// CHECK-NEXT: i=2 v=2 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: i=2 vv=2 +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: [range] iterator dtor +// CHECK-NEXT: [range] dtor +// CHECK-NEXT: [init-stmt] dtor +// CHECK-NEXT: done diff --git a/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-intfor.c b/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-intfor.c new file mode 100644 index 0000000000000..9630fec50bc20 --- /dev/null +++ b/openmp/runtime/test/transform/fuse/parallel-wsloop-collapse-intfor.c @@ -0,0 +1,45 @@ +// RUN: %libomp-cxx-compile-and-run | FileCheck %s --match-full-lines + +#ifndef HEADER +#define HEADER + +#include +#include + +int main() { + printf("do\n"); +#pragma omp parallel for collapse(2) num_threads(1) + for (int i = 0; i < 3; ++i) +#pragma omp fuse + { + for (int j = 0; j < 3; ++j) + printf("i=%d j=%d\n", i, j); + for (int k = 0; k < 3; ++k) + printf("i=%d k=%d\n", i, k); + } + printf("done\n"); + return EXIT_SUCCESS; +} + +#endif /* HEADER */ + +// CHECK: do +// CHECK-NEXT: i=0 j=0 +// CHECK-NEXT: i=0 k=0 +// CHECK-NEXT: i=0 j=1 +// CHECK-NEXT: i=0 k=1 +// CHECK-NEXT: i=0 j=2 +// CHECK-NEXT: i=0 k=2 +// CHECK-NEXT: i=1 j=0 +// CHECK-NEXT: i=1 k=0 +// CHECK-NEXT: i=1 j=1 +// CHECK-NEXT: i=1 k=1 +// CHECK-NEXT: i=1 j=2 +// CHECK-NEXT: i=1 k=2 +// CHECK-NEXT: i=2 j=0 +// CHECK-NEXT: i=2 k=0 +// CHECK-NEXT: i=2 j=1 +// CHECK-NEXT: i=2 k=1 +// CHECK-NEXT: i=2 j=2 +// CHECK-NEXT: i=2 k=2 +// CHECK-NEXT: done diff --git a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel index 13fca5f592c7a..5af035d840946 100644 --- a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel @@ -1313,7 +1313,10 @@ cc_library( cc_library( name = "tooling_dependency_scanning", - srcs = glob(["lib/Tooling/DependencyScanning/**/*.cpp"]), + srcs = glob([ + "lib/Tooling/DependencyScanning/**/*.h", + "lib/Tooling/DependencyScanning/**/*.cpp", + ]), hdrs = glob(["include/clang/Tooling/DependencyScanning/**/*.h"]), deps = [ ":basic", diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel index 9d02ff9f459ae..8d9e80393bf20 100644 --- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel @@ -2657,6 +2657,22 @@ libc_support_library( ], ) +libc_support_library( + name = "__support_math_exp10m1f", + hdrs = ["src/__support/math/exp10m1f.h"], + deps = [ + ":__support_fputil_except_value_utils", + ":__support_fputil_fenv_impl", + ":__support_fputil_fp_bits", + ":__support_fputil_multiply_add", + ":__support_fputil_polyeval", + ":__support_fputil_rounding_mode", + ":__support_macros_optimization", + ":__support_math_exp10f_utils", + ":errno", + ], +) + libc_support_library( name = "__support_math_erff", hdrs = ["src/__support/math/erff.h"], @@ -3613,7 +3629,7 @@ libc_math_function( libc_math_function( name = "exp10m1f", additional_deps = [ - ":__support_math_exp10f_utils", + ":__support_math_exp10m1f", ], ) diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index 2a0cc30b2267c..422c29fc9c4d5 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -6091,6 +6091,7 @@ cc_library( ":DialectUtils", ":GPUDialect", ":IR", + ":InferIntRangeInterface", ":LLVMDialect", ":NVVMOpsIncGen", ":NVVMRequiresSMTraitsIncGen", @@ -6295,6 +6296,7 @@ cc_library( ":BytecodeOpInterface", ":GPUDialect", ":IR", + ":InferIntRangeInterface", ":LLVMDialect", ":ROCDLOpsIncGen", ":SideEffectInterfaces", @@ -6685,6 +6687,7 @@ cc_library( ":IR", ":InferTypeOpInterface", ":SMTIncGen", + ":SideEffectInterfaces", ":Support", "//llvm:Support", ], @@ -11831,6 +11834,7 @@ cc_library( srcs = glob(["lib/Dialect/Transform/PDLExtension/*.cpp"]), hdrs = glob(["include/mlir/Dialect/Transform/PDLExtension/*.h"]), deps = [ + ":BytecodeOpInterface", ":IR", ":PDLDialect", ":PDLInterpDialect", @@ -11945,6 +11949,7 @@ cc_library( srcs = glob(["lib/Dialect/Transform/IRDLExtension/*.cpp"]), hdrs = glob(["include/mlir/Dialect/Transform/IRDLExtension/*.h"]), deps = [ + ":BytecodeOpInterface", ":IR", ":IRDLDialect", ":IRDLInterfacesIncGen", @@ -11986,7 +11991,9 @@ cc_library( srcs = glob(["lib/Dialect/Transform/DebugExtension/*.cpp"]), hdrs = glob(["include/mlir/Dialect/Transform/DebugExtension/*.h"]), deps = [ + ":BytecodeOpInterface", ":IR", + ":SideEffectInterfaces", ":Support", ":TransformDebugExtensionOpsIncGen", ":TransformDialect", @@ -12023,6 +12030,7 @@ cc_library( srcs = glob(["lib/Dialect/Transform/LoopExtension/*.cpp"]), hdrs = glob(["include/mlir/Dialect/Transform/LoopExtension/*.h"]), deps = [ + ":BytecodeOpInterface", ":IR", ":LoopLikeInterface", ":Rewrite", @@ -13071,6 +13079,7 @@ cc_library( ":MPIOpsIncGen", ":MPITypesIncGen", ":MemRefDialect", + ":SideEffectInterfaces", "//llvm:Support", ], )